focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override protected Endpoint createEndpoint(final String uri, final String remaining, final Map<String, Object> parameters) throws Exception { if (ObjectHelper.isEmpty(remaining)) { throw new IllegalArgumentException("You must provide a channel for the Dynamic Router"); } DynamicRouterConfiguration configuration = new DynamicRouterConfiguration(); configuration.setChannel(remaining); filterService.initializeChannelFilters(configuration.getChannel()); DynamicRouterEndpoint endpoint = endpointFactorySupplier.get() .getInstance(uri, this, configuration, processorFactorySupplier, producerFactorySupplier, recipientListSupplier, filterService); setProperties(endpoint, parameters); return endpoint; }
@Test void testCreateEndpoint() throws Exception { component.setCamelContext(context); Endpoint actualEndpoint = component.createEndpoint("dynamic-router:testname", "remaining", Collections.emptyMap()); assertEquals(endpoint, actualEndpoint); }
public long getBackendIdWithStarletPort(String host, int starletPort) { for (Backend backend : idToBackendRef.values()) { if (NetUtils.isSameIP(backend.getHost(), host) && backend.getStarletPort() == starletPort) { return backend.getId(); } } return -1L; }
@Test public void testGetBackendIdWithStarletPort() throws Exception { Backend be = new Backend(10001, "newHost", 1000); be.setStarletPort(10001); service.addBackend(be); long backendId = service.getBackendIdWithStarletPort("newHost", 10001); Assert.assertEquals(be.getId(), backendId); }
public SerializableFunction<T, Row> getToRowFunction() { return toRowFunction; }
@Test public void testWktProtoToRow() throws InvalidProtocolBufferException { ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(WktMessage.getDescriptor()); SerializableFunction<DynamicMessage, Row> toRow = schemaProvider.getToRowFunction(); assertEquals(WKT_MESSAGE_ROW, toRow.apply(toDynamic(WKT_MESSAGE_PROTO))); }
static XmlNode create(String xmlString) { try (InputStream stream = new ByteArrayInputStream(xmlString.getBytes(UTF_8))) { DocumentBuilderFactory dbf = XmlUtil.getNsAwareDocumentBuilderFactory(); Document doc = dbf.newDocumentBuilder().parse(stream); return new XmlNode(doc.getDocumentElement()); } catch (Exception e) { throw new RuntimeException(e); } }
@Test(expected = RuntimeException.class) public void parseError() { // given String xml = "malformed-xml"; // when XmlNode.create(xml); // then // throws exception }
@Override public String getTargetRestEndpointURL() { return "/jobs/:" + JobIDPathParameter.KEY + "/vertices/:" + JobVertexIdPathParameter.KEY + "/subtasks/:" + SubtaskIndexPathParameter.KEY + "/metrics"; }
@Test void testUrl() { assertThat(subtaskMetricsHeaders.getTargetRestEndpointURL()) .isEqualTo( "/jobs/:" + JobIDPathParameter.KEY + "/vertices/:" + JobVertexIdPathParameter.KEY + "/subtasks/:" + SubtaskIndexPathParameter.KEY + "/metrics"); }
public void setProviderInfos(List<ProviderInfo> providerInfos) { this.providerInfos = providerInfos; }
@Test public void setProviderInfos() throws Exception { ProviderGroup pg = new ProviderGroup("xxx", null); List list = pg.getProviderInfos(); Assert.assertNotNull(list); Assert.assertTrue(list.size() == 0); List<ProviderInfo> newps = new ArrayList<ProviderInfo>(); pg.setProviderInfos(newps); Assert.assertNotNull(list); Assert.assertTrue(list != pg.getProviderInfos()); Assert.assertTrue(newps == pg.getProviderInfos()); newps = Arrays.asList(ProviderHelper.toProviderInfo("127.0.0.1:12200")); pg.setProviderInfos(newps); Assert.assertNotNull(list); Assert.assertTrue(newps == pg.getProviderInfos()); Assert.assertTrue(pg.getProviderInfos().size() == 1); }
@Override protected SegmentConversionResult convert(PinotTaskConfig pinotTaskConfig, File indexDir, File workingDir) throws Exception { Map<String, String> configs = pinotTaskConfig.getConfigs(); String tableNameWithType = configs.get(MinionConstants.TABLE_NAME_KEY); String rawTableName = TableNameBuilder.extractRawTableName(tableNameWithType); String taskType = pinotTaskConfig.getTaskType(); SegmentPurger.RecordPurgerFactory recordPurgerFactory = MINION_CONTEXT.getRecordPurgerFactory(); TableConfig tableConfig = getTableConfig(tableNameWithType); Schema schema = getSchema(tableNameWithType); SegmentPurger.RecordPurger recordPurger = recordPurgerFactory != null ? recordPurgerFactory.getRecordPurger(pinotTaskConfig, tableConfig, schema) : null; SegmentPurger.RecordModifierFactory recordModifierFactory = MINION_CONTEXT.getRecordModifierFactory(); SegmentPurger.RecordModifier recordModifier = recordModifierFactory != null ? recordModifierFactory.getRecordModifier(rawTableName) : null; _eventObserver.notifyProgress(pinotTaskConfig, "Purging segment: " + indexDir); SegmentPurger segmentPurger = new SegmentPurger(indexDir, workingDir, tableConfig, schema, recordPurger, recordModifier); long purgeTaskStartTimeNs = MX_BEAN.getCurrentThreadCpuTime(); File purgedSegmentFile = segmentPurger.purgeSegment(); long purgeTaskEndTimeNs = MX_BEAN.getCurrentThreadCpuTime(); _minionMetrics.addTimedTableValue(tableNameWithType, taskType, MinionTimer.TASK_THREAD_CPU_TIME_NS, purgeTaskEndTimeNs - purgeTaskStartTimeNs, TimeUnit.NANOSECONDS); if (purgedSegmentFile == null) { purgedSegmentFile = indexDir; } return new SegmentConversionResult.Builder().setFile(purgedSegmentFile).setTableNameWithType(tableNameWithType) .setSegmentName(configs.get(MinionConstants.SEGMENT_NAME_KEY)) .setCustomProperty(RECORD_PURGER_KEY, segmentPurger.getRecordPurger()) .setCustomProperty(RECORD_MODIFIER_KEY, segmentPurger.getRecordModifier()) .setCustomProperty(NUM_RECORDS_PURGED_KEY, segmentPurger.getNumRecordsPurged()) .setCustomProperty(NUM_RECORDS_MODIFIED_KEY, segmentPurger.getNumRecordsModified()).build(); }
@Test public void testConvert() throws Exception { PurgeTaskExecutor purgeTaskExecutor = new PurgeTaskExecutor(); purgeTaskExecutor.setMinionEventObserver(new MinionProgressObserver()); PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.PurgeTask.TASK_TYPE, Collections .singletonMap(MinionConstants.TABLE_NAME_KEY, TableNameBuilder.OFFLINE.tableNameWithType(TABLE_NAME))); File purgedIndexDir = purgeTaskExecutor.convert(pinotTaskConfig, _originalIndexDir, PURGED_SEGMENT_DIR).getFile(); try (PinotSegmentRecordReader pinotSegmentRecordReader = new PinotSegmentRecordReader(purgedIndexDir)) { int numRecordsRemaining = 0; int numRecordsModified = 0; GenericRow row = new GenericRow(); while (pinotSegmentRecordReader.hasNext()) { row = pinotSegmentRecordReader.next(row); numRecordsRemaining++; if (row.getValue(D1).equals(Integer.MAX_VALUE)) { numRecordsModified++; } } Assert.assertEquals(numRecordsRemaining, NUM_ROWS - 1); Assert.assertEquals(numRecordsModified, NUM_ROWS - 1); } }
@Override public <KEY> URIMappingResult<KEY> mapUris(List<URIKeyPair<KEY>> requestUriKeyPairs) throws ServiceUnavailableException { if (requestUriKeyPairs == null || requestUriKeyPairs.isEmpty()) { return new URIMappingResult<>(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); } // API assumes that all requests will be made to the same service, just use the first request to get the service name and act as sample uri URI sampleURI = requestUriKeyPairs.get(0).getRequestUri(); String serviceName = LoadBalancerUtil.getServiceNameFromUri(sampleURI); // To achieve scatter-gather, we require the following information PartitionAccessor accessor = _partitionInfoProvider.getPartitionAccessor(serviceName); Map<Integer, Ring<URI>> rings = _hashRingProvider.getRings(sampleURI); HashFunction<Request> hashFunction = _hashRingProvider.getRequestHashFunction(serviceName); Map<Integer, Set<KEY>> unmapped = new HashMap<>(); // Pass One Map<Integer, List<URIKeyPair<KEY>>> requestsByPartition = distributeToPartitions(requestUriKeyPairs, accessor, unmapped); // Pass Two Map<URI, Integer> hostToParitionId = new HashMap<>(); Map<URI, Set<KEY>> hostToKeySet = distributeToHosts(requestsByPartition, rings, hashFunction, hostToParitionId, unmapped); return new URIMappingResult<>(hostToKeySet, unmapped, hostToParitionId); }
@Test public void testUniversalStickiness() throws ServiceUnavailableException, URISyntaxException { int partitionCount = 4; int totalHostCount = 200; HashRingProvider ringProvider = createStaticHashRingProvider(totalHostCount, partitionCount, getHashFunction(true)); HashFunction<Request> hashFunction = ringProvider.getRequestHashFunction(TEST_SERVICE); PartitionInfoProvider infoProvider = createRangeBasedPartitionInfoProvider(partitionCount); URIMapper mapper = new RingBasedUriMapper(ringProvider, infoProvider); URIKeyPair<Integer> request1 = new URIKeyPair<>(1, new URI("d2://testService/1")); // no partition, will be unmapped URIKeyPair<Integer> request2 = new URIKeyPair<>(2, new URI("d2://testService/2?partition=0")); // partition 0 URIKeyPair<Integer> request3 = new URIKeyPair<>(3, new URI("d2://testService/3?partition=1")); // partition 1 URIKeyPair<Integer> request4 = new URIKeyPair<>(4, new URI("d2://testService/4?partition=2")); // partition 2 URIKeyPair<Integer> request5 = new URIKeyPair<>(5, new URI("d2://testService/5?partition=3")); // partition 3 URIKeyPair<Integer> request6 = new URIKeyPair<>(6, new URI("d2://testService/6?partition=0")); // partition 0 with different sticky key URIKeyPair<Integer> request7 = new URIKeyPair<>(7, new URI("d2://testService/7?partition=1")); // partition 1 with different sticky key URIKeyPair<Integer> request8 = new URIKeyPair<>(8, new URI("d2://testService/8?partition=2")); // partition 2 with different sticky key URIKeyPair<Integer> request9 = new URIKeyPair<>(9, new URI("d2://testService/9?partition=3")); // partition 3 with different sticky key URIKeyPair<Integer> request10 = new URIKeyPair<>(10, new URI("d2://testService/10?partition=0&uuid=1"));// with extra parameters List<URIKeyPair<Integer>> requests = Arrays.asList(request1, request2, request3, request4, request5, request6, request7, request8, request9, request10); // uriMapper mapping URIMappingResult<Integer> uriMapperResult = mapper.mapUris(requests); // normal mapping Map<Integer, Set<Integer>> normalUnmapped = new HashMap<>(); Map<URI, Set<Integer>> normalHostToKeySet = new HashMap<>(); for (URIKeyPair<Integer> request : requests) { int partitionId = 0; try { partitionId = infoProvider.getPartitionAccessor(TEST_SERVICE).getPartitionId(request.getRequestUri()); } catch (PartitionAccessException e) { normalUnmapped.computeIfAbsent(-1, k -> new HashSet<>()).add(request.getKey()); } Ring<URI> ring = ringProvider.getRings(request.getRequestUri()).get(partitionId); URI uri = ring.get(hashFunction.hash(new URIRequest(request.getRequestUri()))); normalHostToKeySet.computeIfAbsent(uri, k -> new HashSet<>()); normalHostToKeySet.get(uri).add(request.getKey()); } // they should have the same results Assert.assertEquals(uriMapperResult.getUnmappedKeys(), normalUnmapped); for (Map.Entry<URI, Set<Integer>> resolvedKeys : uriMapperResult.getMappedKeys().entrySet()) { Set<Integer> uriMapperKeySet = resolvedKeys.getValue(); Assert.assertTrue(normalHostToKeySet.containsKey(resolvedKeys.getKey())); Set<Integer> normalKeySet = normalHostToKeySet.get(resolvedKeys.getKey()); Assert.assertEquals(uriMapperKeySet, normalKeySet); } }
public com.thoughtworks.go.plugin.api.material.packagerepository.PackageConfiguration getPackageConfiguration(String pluginId) { return pluginRequestHelper.submitRequest(pluginId, REQUEST_PACKAGE_CONFIGURATION, new DefaultPluginInteractionCallback<>() { @Override public com.thoughtworks.go.plugin.api.material.packagerepository.PackageConfiguration onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { return messageConverter(resolvedExtensionVersion).responseMessageForPackageConfiguration(responseBody); } }); }
@Test public void shouldTalkToPluginToGetPackageConfiguration() throws Exception { String expectedRequestBody = null; String expectedResponseBody = "{" + "\"key-one\":{}," + "\"key-two\":{\"default-value\":\"two\",\"part-of-identity\":true,\"secure\":true,\"required\":true,\"display-name\":\"display-two\",\"display-order\":\"1\"}," + "\"key-three\":{\"default-value\":\"three\",\"part-of-identity\":false,\"secure\":false,\"required\":false,\"display-name\":\"display-three\",\"display-order\":\"2\"}" + "}"; when(pluginManager.isPluginOfType(PACKAGE_MATERIAL_EXTENSION, PLUGIN_ID)).thenReturn(true); when(pluginManager.submitTo(eq(PLUGIN_ID), eq(PACKAGE_MATERIAL_EXTENSION), requestArgumentCaptor.capture())).thenReturn(DefaultGoPluginApiResponse.success(expectedResponseBody)); com.thoughtworks.go.plugin.api.material.packagerepository.PackageConfiguration packageConfiguration = extension.getPackageConfiguration(PLUGIN_ID); assertRequest(requestArgumentCaptor.getValue(), PACKAGE_MATERIAL_EXTENSION, "1.0", PackageRepositoryExtension.REQUEST_PACKAGE_CONFIGURATION, expectedRequestBody); assertPropertyConfiguration((PackageMaterialProperty) packageConfiguration.get("key-one"), "key-one", "", true, true, false, "", 0); assertPropertyConfiguration((PackageMaterialProperty) packageConfiguration.get("key-two"), "key-two", "two", true, true, true, "display-two", 1); assertPropertyConfiguration((PackageMaterialProperty) packageConfiguration.get("key-three"), "key-three", "three", false, false, false, "display-three", 2); }
@Override public Set<TopicPartition> getAllSubscribedPartitions(Consumer<?, ?> consumer) { Set<TopicPartition> allPartitions = new HashSet<>(); for (String topic : topics) { List<PartitionInfo> partitionInfoList = consumer.partitionsFor(topic); if (partitionInfoList != null) { for (PartitionInfo partitionInfo : partitionInfoList) { allPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); } } else { LOG.warn("Topic {} not found, skipping addition of the topic", topic); } } return allPartitions; }
@Test public void testFilterOnAbsentTopic() { String presentTopic = "present"; String absentTopic = "absent"; NamedTopicFilter filter = new NamedTopicFilter(presentTopic, absentTopic); when(consumerMock.partitionsFor(presentTopic)).thenReturn(Collections.singletonList(createPartitionInfo(presentTopic, 2))); when(consumerMock.partitionsFor(absentTopic)).thenReturn(null); Set<TopicPartition> presentPartitions = filter.getAllSubscribedPartitions(consumerMock); assertThat("Expected filter to pass only topics which are present", presentPartitions, contains(new TopicPartition(presentTopic, 2))); }
public void fetch(DownloadAction downloadAction, URLService urlService) throws Exception { downloadChecksumFile(downloadAction, urlService.baseRemoteURL()); downloadArtifact(downloadAction, urlService.baseRemoteURL()); }
@Test public void shouldUnzipWhenFetchingFolder() throws Exception { ChecksumFileHandler checksumFileHandler = mock(ChecksumFileHandler.class); when(checksumFileHandler.handleResult(SC_OK, publisher)).thenReturn(true); File destOnAgent = new File("pipelines/cruise/", dest.getPath()); FetchArtifactBuilder builder = getBuilder(new JobIdentifier("cruise", -10, "1", "dev", "1", "windows", 1L), "log", dest.getPath(), new DirHandler("log",destOnAgent), checksumFileHandler); builder.fetch(new DownloadAction(new StubFetchZipHttpService(), publisher, clock), new StubURLService()); assertDownloaded(destOnAgent); }
public static boolean areCompatible(final SqlArgument actual, final ParamType declared) { return areCompatible(actual, declared, false); }
@Test public void shouldFailINonCompatibleSchemas() { assertThat(ParamTypes.areCompatible( SqlArgument.of(SqlTypes.STRING), ParamTypes.INTEGER, false), is(false)); assertThat(ParamTypes.areCompatible( SqlArgument.of(SqlTypes.STRING), GenericType.of("T"), false), is(false)); assertThat( ParamTypes.areCompatible( SqlArgument.of(SqlTypes.array(SqlTypes.INTEGER)), ArrayType.of(ParamTypes.STRING), false), is(false)); assertThat(ParamTypes.areCompatible( SqlArgument.of(SqlTypes.struct().field("a", SqlTypes.decimal(1, 1)).build()), StructType.builder().field("a", ParamTypes.DOUBLE).build(), false), is(false)); assertThat(ParamTypes.areCompatible( SqlArgument.of(SqlTypes.map(SqlTypes.STRING, SqlTypes.decimal(1, 1))), MapType.of(ParamTypes.STRING, ParamTypes.INTEGER), false), is(false)); assertThat(ParamTypes.areCompatible( SqlArgument.of(SqlTypes.map(SqlTypes.decimal(1, 1), SqlTypes.INTEGER)), MapType.of(ParamTypes.INTEGER, ParamTypes.INTEGER), false), is(false)); assertThat(ParamTypes.areCompatible( SqlArgument.of(SqlLambdaResolved.of(ImmutableList.of(SqlTypes.INTEGER), SqlTypes.INTEGER)), LambdaType.of(ImmutableList.of(ParamTypes.STRING), ParamTypes.STRING), false), is(false)); }
@Override public String toString() { String bounds = printExtendsClause() ? " extends " + joinTypeNames(upperBounds) : ""; return getClass().getSimpleName() + '{' + getName() + bounds + '}'; }
@Test public void toString_upper_bounded_by_single_bound() { @SuppressWarnings("unused") class BoundedBySingleBound<NAME extends String> { } JavaTypeVariable<JavaClass> typeVariable = new ClassFileImporter().importClass(BoundedBySingleBound.class).getTypeParameters().get(0); assertThat(typeVariable.toString()) .contains(JavaTypeVariable.class.getSimpleName()) .contains("NAME extends java.lang.String"); }
public static Combine.BinaryCombineDoubleFn ofDoubles() { return new Min.MinDoubleFn(); }
@Test public void testMinDoubleFnInfinity() { testCombineFn( Min.ofDoubles(), Lists.newArrayList(Double.NEGATIVE_INFINITY, 2.0, 3.0, Double.POSITIVE_INFINITY), Double.NEGATIVE_INFINITY); }
protected String getFileName(double lat, double lon) { lon = 1 + (180 + lon) / LAT_DEGREE; int lonInt = (int) lon; lat = 1 + (60 - lat) / LAT_DEGREE; int latInt = (int) lat; if (Math.abs(latInt - lat) < invPrecision / LAT_DEGREE) latInt--; // replace String.format as it seems to be slow // String.format("srtm_%02d_%02d", lonInt, latInt); String str = "srtm_"; str += lonInt < 10 ? "0" : ""; str += lonInt; str += latInt < 10 ? "_0" : "_"; str += latInt; return str; }
@Disabled @Test public void testGetEleHorizontalBorder() { // Border between the tiles N42E011 and N42E012 assertEquals("srtm_38_04", instance.getFileName(44.94, 9.999999)); assertEquals(48, instance.getEle(44.94, 9.999999), precision); assertEquals("srtm_39_04", instance.getFileName(44.94, 10.000001)); assertEquals(48, instance.getEle(44.94, 10.000001), precision); }
@Override protected TableRecords getUndoRows() { return sqlUndoLog.getAfterImage(); }
@Test public void getUndoRows() { OracleUndoInsertExecutor executor = upperCase(); Assertions.assertEquals(executor.getUndoRows(), executor.getSqlUndoLog().getAfterImage()); }
public static DocumentBuilderFactory newSecureDocumentBuilderFactory() throws ParserConfigurationException { DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); dbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); dbf.setFeature(DISALLOW_DOCTYPE_DECL, true); dbf.setFeature(LOAD_EXTERNAL_DECL, false); dbf.setFeature(EXTERNAL_GENERAL_ENTITIES, false); dbf.setFeature(EXTERNAL_PARAMETER_ENTITIES, false); dbf.setFeature(CREATE_ENTITY_REF_NODES, false); return dbf; }
@Test(expected = SAXException.class) public void testExternalDtdWithSecureDocumentBuilderFactory() throws Exception { DocumentBuilder db = XMLUtils.newSecureDocumentBuilderFactory().newDocumentBuilder(); try (InputStream stream = getResourceStream("/xml/external-dtd.xml")) { Document doc = db.parse(stream); } }
public alluxio.grpc.WorkerIdentity toProto() { return Parsers.toProto(this); }
@Test public void parserToProto() throws Exception { byte[] idBytes = BufferUtils.getIncreasingByteArray(16); WorkerIdentity identity = new WorkerIdentity(idBytes, 1); alluxio.grpc.WorkerIdentity proto = WorkerIdentity.Parsers.toProto(identity); assertEquals(1, proto.getVersion()); assertArrayEquals(idBytes, proto.getIdentifier().toByteArray()); }
@SuppressWarnings("unchecked") public static <T> Collection<T> getServiceInstances(final Class<T> serviceInterface) { return (Collection<T>) getRegisteredSPI(serviceInterface).getServiceInstances(); }
@Test void assertGetServiceInstancesWithEmptyInstances() { assertTrue(ShardingSphereServiceLoader.getServiceInstances(EmptySPIFixture.class).isEmpty()); }
public String getServiceKey() { if (serviceKey != null) { return serviceKey; } String inf = getServiceInterface(); if (inf == null) { return null; } serviceKey = buildKey(inf, getGroup(), getVersion()); return serviceKey; }
@Test void testGetServiceKey() { URL url1 = URL.valueOf("10.20.130.230:20880/context/path?interface=org.apache.dubbo.test.interfaceName"); assertURLStrDecoder(url1); Assertions.assertEquals("org.apache.dubbo.test.interfaceName", url1.getServiceKey()); URL url2 = URL.valueOf( "10.20.130.230:20880/org.apache.dubbo.test.interfaceName?interface=org.apache.dubbo.test.interfaceName"); assertURLStrDecoder(url2); Assertions.assertEquals("org.apache.dubbo.test.interfaceName", url2.getServiceKey()); URL url3 = URL.valueOf( "10.20.130.230:20880/org.apache.dubbo.test.interfaceName?interface=org.apache.dubbo.test.interfaceName&group=group1&version=1.0.0"); assertURLStrDecoder(url3); Assertions.assertEquals("group1/org.apache.dubbo.test.interfaceName:1.0.0", url3.getServiceKey()); URL url4 = URL.valueOf("10.20.130.230:20880/context/path?interface=org.apache.dubbo.test.interfaceName"); assertURLStrDecoder(url4); Assertions.assertEquals("context/path", url4.getPathKey()); URL url5 = URL.valueOf( "10.20.130.230:20880/context/path?interface=org.apache.dubbo.test.interfaceName&group=group1&version=1.0.0"); assertURLStrDecoder(url5); Assertions.assertEquals("group1/context/path:1.0.0", url5.getPathKey()); }
public static SingleInputSemanticProperties addSourceFieldOffset( SingleInputSemanticProperties props, int numInputFields, int offset) { SingleInputSemanticProperties offsetProps = new SingleInputSemanticProperties(); if (props.getReadFields(0) != null) { FieldSet offsetReadFields = new FieldSet(); for (int r : props.getReadFields(0)) { offsetReadFields = offsetReadFields.addField(r + offset); } offsetProps.addReadFields(offsetReadFields); } for (int s = 0; s < numInputFields; s++) { FieldSet targetFields = props.getForwardingTargetFields(0, s); for (int t : targetFields) { offsetProps.addForwardedField(s + offset, t); } } return offsetProps; }
@Test void testAddSourceFieldOffset() { SingleInputSemanticProperties semProps = new SingleInputSemanticProperties(); semProps.addForwardedField(0, 1); semProps.addForwardedField(0, 4); semProps.addForwardedField(2, 0); semProps.addForwardedField(4, 3); semProps.addReadFields(new FieldSet(0, 3)); SemanticProperties offsetProps = SemanticPropUtil.addSourceFieldOffset(semProps, 5, 0); assertThat(offsetProps.getForwardingTargetFields(0, 0)).containsExactly(4, 1); assertThat(offsetProps.getForwardingTargetFields(0, 1)).isEmpty(); assertThat(offsetProps.getForwardingTargetFields(0, 2)).containsExactly(0); assertThat(offsetProps.getForwardingTargetFields(0, 3)).isEmpty(); assertThat(offsetProps.getForwardingTargetFields(0, 4)).containsExactly(3); assertThat(offsetProps.getReadFields(0)).containsExactly(0, 3); offsetProps = SemanticPropUtil.addSourceFieldOffset(semProps, 5, 3); assertThat(offsetProps.getForwardingTargetFields(0, 0)).isEmpty(); assertThat(offsetProps.getForwardingTargetFields(0, 1)).isEmpty(); assertThat(offsetProps.getForwardingTargetFields(0, 2)).isEmpty(); assertThat(offsetProps.getForwardingTargetFields(0, 3)).containsExactly(4, 1); assertThat(offsetProps.getForwardingTargetFields(0, 4)).isEmpty(); assertThat(offsetProps.getForwardingTargetFields(0, 5)).containsExactly(0); assertThat(offsetProps.getForwardingTargetFields(0, 6)).isEmpty(); assertThat(offsetProps.getForwardingTargetFields(0, 7)).containsExactly(3); assertThat(offsetProps.getReadFields(0)).containsExactly(6, 3); semProps = new SingleInputSemanticProperties(); SemanticPropUtil.addSourceFieldOffset(semProps, 1, 0); semProps = new SingleInputSemanticProperties(); semProps.addForwardedField(0, 0); semProps.addForwardedField(1, 2); semProps.addForwardedField(2, 4); offsetProps = SemanticPropUtil.addSourceFieldOffset(semProps, 3, 2); assertThat(offsetProps.getForwardingTargetFields(0, 0)).isEmpty(); assertThat(offsetProps.getForwardingTargetFields(0, 1)).isEmpty(); assertThat(offsetProps.getForwardingTargetFields(0, 2)).containsExactly(0); assertThat(offsetProps.getForwardingTargetFields(0, 3)).containsExactly(2); assertThat(offsetProps.getForwardingTargetFields(0, 4)).containsExactly(4); }
@Override public void initialize(PulsarService pulsarService) throws Exception { final ClassLoader previousContext = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(narClassLoader); this.interceptor.initialize(pulsarService); } finally { Thread.currentThread().setContextClassLoader(previousContext); } }
@Test public void testWrapper() throws Exception { BrokerInterceptor h = mock(BrokerInterceptor.class); NarClassLoader loader = mock(NarClassLoader.class); BrokerInterceptorWithClassLoader wrapper = new BrokerInterceptorWithClassLoader(h, loader); PulsarService pulsarService = mock(PulsarService.class); wrapper.initialize(pulsarService); verify(h, times(1)).initialize(same(pulsarService)); }
@Override public void startScheduling() { executorService.scheduleWithFixedDelay(this::cleanCeQueue, ceConfiguration.getCleanTasksInitialDelay(), ceConfiguration.getCleanTasksDelay(), MINUTES); }
@Test public void startScheduling_calls_cleaning_methods_of_internalCeQueue_at_fixed_rate_with_value_from_CeConfiguration() { InternalCeQueue mockedInternalCeQueue = mock(InternalCeQueue.class); long wornOutInitialDelay = 10L; long wornOutDelay = 20L; long unknownWorkerInitialDelay = 11L; long unknownWorkerDelay = 21L; CeConfiguration mockedCeConfiguration = mockCeConfiguration(wornOutInitialDelay, wornOutDelay); CeCleaningAdapter executorService = new CeCleaningAdapter() { @Override public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initDelay, long period, TimeUnit unit) { schedulerCounter++; switch(schedulerCounter) { case 1: assertThat(initDelay).isEqualTo(wornOutInitialDelay); assertThat(period).isEqualTo(wornOutDelay); assertThat(unit).isEqualTo(TimeUnit.MINUTES); break; case 2: assertThat(initDelay).isEqualTo(unknownWorkerInitialDelay); assertThat(period).isEqualTo(unknownWorkerDelay); assertThat(unit).isEqualTo(TimeUnit.MINUTES); break; default: fail("Unknown call of scheduleWithFixedDelay"); } // synchronously execute command command.run(); return null; } }; CeCleaningSchedulerImpl underTest = new CeCleaningSchedulerImpl(executorService, mockedCeConfiguration, mockedInternalCeQueue, mockCeDistributedInformation(jobLock)); underTest.startScheduling(); assertThat(executorService.schedulerCounter).isOne(); }
public static L3ModificationInstruction modArpSpa(IpAddress addr) { checkNotNull(addr, "Src l3 ARP IP address cannot be null"); return new ModArpIPInstruction(L3SubType.ARP_SPA, addr); }
@Test public void testModArpSpaMethod() { final Instruction instruction = Instructions.modArpSpa(ip41); final L3ModificationInstruction.ModArpIPInstruction modArpIPInstruction = checkAndConvert(instruction, Instruction.Type.L3MODIFICATION, L3ModificationInstruction.ModArpIPInstruction.class); assertThat(modArpIPInstruction.subtype(), is(L3ModificationInstruction.L3SubType.ARP_SPA)); assertThat(modArpIPInstruction.ip(), is(ip41)); }
public static String escapeLuceneQuery(final CharSequence text) { if (text == null) { return null; } final int size = text.length() << 1; final StringBuilder buf = new StringBuilder(size); appendEscapedLuceneQuery(buf, text); return buf.toString(); }
@Test public void testEscapeLuceneQuery() { CharSequence text = "test encoding + - & | ! ( ) { } [ ] ^ \" ~ * ? : \\"; String expResult = "test encoding \\+ \\- \\& \\| \\! \\( \\) \\{ \\} \\[ \\] \\^ \\\" \\~ \\* \\? \\: \\\\"; String result = LuceneUtils.escapeLuceneQuery(text); assertEquals(expResult, result); }
public void isInstanceOf(Class<?> clazz) { if (clazz == null) { throw new NullPointerException("clazz"); } if (actual == null) { failWithActual("expected instance of", clazz.getName()); return; } if (!isInstanceOfType(actual, clazz)) { if (Platform.classMetadataUnsupported()) { throw new UnsupportedOperationException( actualCustomStringRepresentation() + ", an instance of " + actual.getClass().getName() + ", may or may not be an instance of " + clazz.getName() + ". Under -XdisableClassMetadata, we do not have enough information to tell."); } failWithoutActual( fact("expected instance of", clazz.getName()), fact("but was instance of", actual.getClass().getName()), fact("with value", actualCustomStringRepresentation())); } }
@Test public void isInstanceOfInterfaceForNull() { expectFailure.whenTesting().that((Object) null).isInstanceOf(CharSequence.class); }
abstract void execute(Admin admin, Namespace ns, PrintStream out) throws Exception;
@Test public void testFindHangingNoMappedTransactionalId() throws Exception { TopicPartition topicPartition = new TopicPartition("foo", 5); String[] args = new String[]{ "--bootstrap-server", "localhost:9092", "find-hanging", "--topic", topicPartition.topic(), "--partition", String.valueOf(topicPartition.partition()) }; long producerId = 132L; short producerEpoch = 5; long lastTimestamp = time.milliseconds() - TimeUnit.MINUTES.toMillis(60); int coordinatorEpoch = 19; long txnStartOffset = 29384L; expectDescribeProducers( topicPartition, producerId, producerEpoch, lastTimestamp, OptionalInt.of(coordinatorEpoch), OptionalLong.of(txnStartOffset) ); expectListTransactions( new ListTransactionsOptions().filterProducerIds(singleton(producerId)), singletonMap(1, Collections.emptyList()) ); expectDescribeTransactions(Collections.emptyMap()); execute(args); assertNormalExit(); assertHangingTransaction( topicPartition, producerId, producerEpoch, coordinatorEpoch, txnStartOffset, lastTimestamp ); }
public static String hadoopFsListAsString(String files, Configuration conf, String user) throws URISyntaxException, FileNotFoundException, IOException, InterruptedException { if (files == null || conf == null) { return null; } return StringUtils.arrayToString(hadoopFsListAsArray(files, conf, user)); }
@Test public void testHadoopFsListAsString() { try { String tmpFileName1 = "/tmp/testHadoopFsListAsString1"; String tmpFileName2 = "/tmp/testHadoopFsListAsString2"; File tmpFile1 = new File(tmpFileName1); File tmpFile2 = new File(tmpFileName2); tmpFile1.createNewFile(); tmpFile2.createNewFile(); Assert.assertTrue(TempletonUtils.hadoopFsListAsString(null, null, null) == null); Assert.assertTrue(TempletonUtils.hadoopFsListAsString("/tmp,/usr", null, null) == null); Assert.assertEquals("file:" + tmpFileName1 + ",file:" + tmpFileName2, TempletonUtils.hadoopFsListAsString (tmpFileName1 + "," + tmpFileName2, new Configuration(), null)); } catch (FileNotFoundException e) { Assert.fail("Couldn't find name for " + tmpFile.toURI().toString()); } catch (Exception e) { // Something else is wrong e.printStackTrace(); } try { TempletonUtils.hadoopFsListAsString("/scoobydoo/teddybear,joe", new Configuration(), null); Assert.fail("Should not have found /scoobydoo/teddybear"); } catch (FileNotFoundException e) { // Should go here. } catch (Exception e) { // Something else is wrong. e.printStackTrace(); } }
@Override public void set(Map<String, ?> buckets) { commandExecutor.get(setAsync(buckets)); }
@Test public void testSetWithPath() { RJsonBuckets buckets = redisson.getJsonBuckets(new JacksonCodec<>(TestType.class)); Map<String, Object> map = new HashMap<>(); IntStream.range(0, 1000).forEach(i -> { TestType testType = new TestType(); testType.setName("name" + i); NestedType nestedType = new NestedType(); nestedType.setValue(i); testType.setType(nestedType); map.put(testType.getName(), testType); }); buckets.set(map); map.clear(); IntStream.range(0, 1000).forEach(i -> { map.put("name" + i, i + 1); }); buckets.set(new IntegerCodec(), "$.type.value", map); }
@Override public List<TScanRangeLocations> getScanRangeLocations(long maxScanRangeLength) { return result; }
@Test public void testGetScanRangeLocations() throws Exception { List<Column> columns = Lists.newArrayList(new Column("k1", INT), new Column("k2", INT)); IcebergTable icebergTable = new IcebergTable(1, "srTableName", "iceberg_catalog", "resource_name", "iceberg_db", "iceberg_table", "", columns, mockedNativeTableC, Maps.newHashMap()); Analyzer analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), new ConnectContext()); DescriptorTable descTable = analyzer.getDescTbl(); TupleDescriptor tupleDesc = descTable.createTupleDescriptor("DestTableTuple"); tupleDesc.setTable(icebergTable); IcebergScanNode scanNode = new IcebergScanNode(new PlanNodeId(0), tupleDesc, "IcebergScanNode", tupleDesc); mockedNativeTableC.newRowDelta().addRows(FILE_B_1).addDeletes(FILE_C_1).commit(); mockedNativeTableC.refresh(); scanNode.setSnapshotId(Optional.of(mockedNativeTableC.currentSnapshot().snapshotId())); scanNode.setupScanRangeLocations(descTable); List<TScanRangeLocations> result = scanNode.getScanRangeLocations(1); Assert.assertTrue(result.size() > 0); TScanRange scanRange = result.get(0).scan_range; Assert.assertTrue(scanRange.isSetHdfs_scan_range()); THdfsScanRange hdfsScanRange = scanRange.hdfs_scan_range; Assert.assertEquals("/path/to/data-b1.parquet", hdfsScanRange.full_path); Assert.assertEquals(1, hdfsScanRange.delete_files.size()); TIcebergDeleteFile deleteFile = hdfsScanRange.delete_files.get(0); Assert.assertEquals("delete.orc", deleteFile.full_path); Assert.assertEquals(TIcebergFileContent.POSITION_DELETES, deleteFile.file_content); }
static byte[] longTo4ByteArray(long n) { byte[] bytes = Arrays.copyOfRange(ByteBuffer.allocate(8).putLong(n).array(), 4, 8); assert bytes.length == 4 : bytes.length; return bytes; }
@Test public void testLongToByteArray() { byte[] bytes = HDUtils.longTo4ByteArray(1026); assertEquals("00000402", ByteUtils.formatHex(bytes)); }
@Override public void deleteTenantPackage(Long id) { // 校验存在 validateTenantPackageExists(id); // 校验正在使用 validateTenantUsed(id); // 删除 tenantPackageMapper.deleteById(id); }
@Test public void testDeleteTenantPackage_used() { // mock 数据 TenantPackageDO dbTenantPackage = randomPojo(TenantPackageDO.class); tenantPackageMapper.insert(dbTenantPackage);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbTenantPackage.getId(); // mock 租户在使用该套餐 when(tenantService.getTenantCountByPackageId(eq(id))).thenReturn(1L); // 调用, 并断言异常 assertServiceException(() -> tenantPackageService.deleteTenantPackage(id), TENANT_PACKAGE_USED); }
@Udf public String lpad( @UdfParameter(description = "String to be padded") final String input, @UdfParameter(description = "Target length") final Integer targetLen, @UdfParameter(description = "Padding string") final String padding) { if (input == null) { return null; } if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) { return null; } final StringBuilder sb = new StringBuilder(targetLen + padding.length()); final int padUpTo = Math.max(targetLen - input.length(), 0); for (int i = 0; i < padUpTo; i += padding.length()) { sb.append(padding); } sb.setLength(padUpTo); sb.append(input); sb.setLength(targetLen); return sb.toString(); }
@Test public void shouldPadEmptyInputBytes() { final ByteBuffer result = udf.lpad(EMPTY_BYTES, 4, BYTES_45); assertThat(result, is(ByteBuffer.wrap(new byte[]{4,5,4,5}))); }
private static boolean ipV6Check(byte[] ip) { if (ip.length != 16) { throw new RuntimeException("illegal ipv6 bytes"); } InetAddressValidator validator = InetAddressValidator.getInstance(); return validator.isValidInet6Address(ipToIPv6Str(ip)); }
@Test public void testIPv6Check() throws UnknownHostException { InetAddress nonInternal = InetAddress.getByName("2408:4004:0180:8100:3FAA:1DDE:2B3F:898A"); InetAddress internal = InetAddress.getByName("FE80:0000:0000:0000:0000:0000:0000:FFFF"); assertThat(UtilAll.isInternalV6IP(nonInternal)).isFalse(); assertThat(UtilAll.isInternalV6IP(internal)).isTrue(); assertThat(UtilAll.ipToIPv6Str(nonInternal.getAddress()).toUpperCase()).isEqualTo("2408:4004:0180:8100:3FAA:1DDE:2B3F:898A"); }
@Override public void execute(Runnable command) { internalExecutor.execute(command); }
@Test public void testExecuteInternal() { TestRunnable runnable = new TestRunnable(); executionService.execute(runnable); runnable.await(); }
public CompletableFuture<SendPushNotificationResult> sendRateLimitChallengeNotification(final Account destination, final String challengeToken) throws NotPushRegisteredException { final Device device = destination.getPrimaryDevice(); final Pair<String, PushNotification.TokenType> tokenAndType = getToken(device); return sendNotification(new PushNotification(tokenAndType.first(), tokenAndType.second(), PushNotification.NotificationType.RATE_LIMIT_CHALLENGE, challengeToken, destination, device, true)) .thenApply(maybeResponse -> maybeResponse.orElseThrow(() -> new AssertionError("Responses must be present for urgent notifications"))); }
@Test void sendRateLimitChallengeNotification() throws NotPushRegisteredException { final Account account = mock(Account.class); final Device device = mock(Device.class); final String deviceToken = "token"; final String challengeToken = "challenge"; when(device.getId()).thenReturn(Device.PRIMARY_ID); when(device.getApnId()).thenReturn(deviceToken); when(account.getPrimaryDevice()).thenReturn(device); when(apnSender.sendNotification(any())) .thenReturn(CompletableFuture.completedFuture(new SendPushNotificationResult(true, Optional.empty(), false, Optional.empty()))); pushNotificationManager.sendRateLimitChallengeNotification(account, challengeToken); verify(apnSender).sendNotification(new PushNotification(deviceToken, PushNotification.TokenType.APN, PushNotification.NotificationType.RATE_LIMIT_CHALLENGE, challengeToken, account, device, true)); }
@Override public List<QueuedCommand> getNewCommands(final Duration timeout) { completeSatisfiedSequenceNumberFutures(); final List<QueuedCommand> commands = Lists.newArrayList(); final Iterable<ConsumerRecord<byte[], byte[]>> records = commandTopic.getNewCommands(timeout); for (ConsumerRecord<byte[], byte[]> record: records) { if (record.value() != null) { Optional<CommandStatusFuture> commandStatusFuture = Optional.empty(); try { final CommandId commandId = commandIdDeserializer.deserialize(commandTopicName, record.key()); commandStatusFuture = Optional.ofNullable(commandStatusMap.remove(commandId)); } catch (Exception e) { LOG.warn( "Error while attempting to fetch from commandStatusMap for key {}", record.key(), e); } commands.add(new QueuedCommand( record.key(), record.value(), commandStatusFuture, record.offset())); } } return commands; }
@Test public void shouldCompleteFuturesWhenGettingNewCommands() { // Given: when(commandTopic.getCommandTopicConsumerPosition()).thenReturn(22L); // When: commandStore.getNewCommands(NEW_CMDS_TIMEOUT); // Then: final InOrder inOrder = inOrder(sequenceNumberFutureStore, commandTopic); inOrder.verify(sequenceNumberFutureStore) .completeFuturesUpToAndIncludingSequenceNumber(eq(21L)); inOrder.verify(commandTopic).getNewCommands(any()); }
public static Short max(Short a, Short b) { return a >= b ? a : b; }
@Test void testMax() { assertThat(summarize(-1000, 0, 1, 50, 999, 1001).getMax().shortValue()) .isEqualTo((short) 1001); assertThat(summarize((int) Short.MIN_VALUE, -1000, 0).getMax().shortValue()).isZero(); assertThat(summarize(1, 8, 7, 6, 9, 10, 2, 3, 5, 0, 11, -2, 3).getMax().shortValue()) .isEqualTo((short) 11); assertThat( summarize(1, 8, 7, 6, 9, null, 10, 2, 3, 5, null, 0, 11, -2, 3) .getMax() .shortValue()) .isEqualTo((short) 11); assertThat(summarize().getMax()).isNull(); }
@Nonnull public static ToConverter getToConverter(QueryDataType type) { if (type.getTypeFamily() == QueryDataTypeFamily.OBJECT) { // User-defined types are subject to the same conversion rules as ordinary OBJECT. type = QueryDataType.OBJECT; } return Objects.requireNonNull(CONVERTERS.get(type), "missing converter for " + type); }
@Test public void test_zonedDateTimeConversion() { OffsetDateTime time = OffsetDateTime.of(2020, 9, 8, 11, 4, 0, 0, UTC); Object converted = getToConverter(TIMESTAMP_WITH_TZ_ZONED_DATE_TIME).convert(time); assertThat(converted).isEqualTo(time.toZonedDateTime()); }
public static List<FieldInfo> buildSourceSchemaEntity(final LogicalSchema schema) { final List<FieldInfo> allFields = schema.columns().stream() .map(EntityUtil::toFieldInfo) .collect(Collectors.toList()); if (allFields.isEmpty()) { throw new IllegalArgumentException("Root schema should contain columns: " + schema); } return allFields; }
@Test public void shouldNotExposeMetaColumns() { // Given: final LogicalSchema schema = LogicalSchema.builder() .valueColumn(ColumnName.of("bob"), SqlTypes.STRING) .build(); // When: final List<FieldInfo> fields = EntityUtil.buildSourceSchemaEntity(schema); // Then: assertThat(fields, hasSize(1)); }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test(description = "overridden generic resource methods operationId") public void testTicket3426() { Reader reader = new Reader(new OpenAPI()); OpenAPI openAPI = reader.read(Ticket3426Resource.class); String yaml = "openapi: 3.0.1\n" + "paths:\n" + " /inheritExample/{input}:\n" + " get:\n" + " operationId: get\n" + " parameters:\n" + " - name: input\n" + " in: path\n" + " required: true\n" + " schema:\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " '*/*':\n" + " schema:\n" + " type: string"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); }
@Override public String getAuthMethodName() { return AUTH_METHOD_NAME; }
@Test public void testGetAuthMethodName() { assertEquals(this.auth.getAuthMethodName(), "token"); }
@Override public void connect() throws IllegalStateException, IOException { if (isConnected()) { throw new IllegalStateException("Already connected"); } InetSocketAddress address = this.address; // the previous dns retry logic did not work, as address.getAddress would always return the cached value // this version of the simplified logic will always cause a dns request if hostname has been supplied. // InetAddress.getByName forces the dns lookup // if an InetSocketAddress was supplied at create time that will take precedence. if (address == null || address.getHostName() == null && hostname != null) { address = new InetSocketAddress(hostname, port); } if (address.getAddress() == null) { throw new UnknownHostException(address.getHostName()); } this.socket = socketFactory.createSocket(address.getAddress(), address.getPort()); this.writer = new BufferedWriter(new OutputStreamWriter(socket.getOutputStream(), charset)); }
@Test public void connectsToGraphiteWithInetSocketAddress() throws Exception { try (Graphite graphite = new Graphite(address, socketFactory)) { graphite.connect(); } verify(socketFactory).createSocket(address.getAddress(), address.getPort()); }
@Override public boolean hasNext() { if (currentIndex < currentData.size()) { return true; } if (currentPageable == null) { return false; } currentData = loadData(); currentIndex = 0; return !currentData.isEmpty(); }
@Test @SuppressWarnings("unchecked") void testConstructor_loadsData() { Page<Extension> page = mock(Page.class); when(page.getContent()).thenReturn(List.of(mock(Extension.class))); when(page.hasNext()).thenReturn(true); when(page.nextPageable()).thenReturn( PageRequest.of(1, DefaultExtensionIterator.DEFAULT_PAGE_SIZE, Sort.by("name"))); when(lister.list(any())).thenReturn(page); var iterator = new DefaultExtensionIterator<>(lister); assertThat(iterator.hasNext()).isTrue(); }
public int getServerSocketBacklog() { return serverSocketBacklog; }
@Test public void testChangeConfigBySystemProperty() { System.setProperty(NettySystemConfig.COM_ROCKETMQ_REMOTING_SOCKET_BACKLOG, "65535"); NettySystemConfig.socketBacklog = Integer.parseInt(System.getProperty(NettySystemConfig.COM_ROCKETMQ_REMOTING_SOCKET_BACKLOG, "1024")); NettyServerConfig changedConfig = new NettyServerConfig(); assertThat(changedConfig.getServerSocketBacklog()).isEqualTo(65535); }
public static <K, V, S extends StateStore> Materialized<K, V, S> as(final DslStoreSuppliers storeSuppliers) { Objects.requireNonNull(storeSuppliers, "store type can't be null"); return new Materialized<>(storeSuppliers); }
@Test public void shouldThrowNullPointerIfSessionBytesStoreSupplierIsNull() { final NullPointerException e = assertThrows(NullPointerException.class, () -> Materialized.as((SessionBytesStoreSupplier) null)); assertEquals(e.getMessage(), "supplier can't be null"); }
@SafeVarargs public static <T> T[] create(T... args) { return args; }
@Test public void testCreate() { assertArrayEquals(create("a"), (new String[] {"a"})); assertArrayEquals(create(""), (new String[] {""})); assertArrayEquals(create("a", "b"), (new String[] {"a", "b"})); }
public MetricsBuilder enableMetadata(Boolean enableMetadata) { this.enableMetadata = enableMetadata; return getThis(); }
@Test void enableMetadata() { MetricsBuilder builder = MetricsBuilder.newBuilder(); builder.enableMetadata(true); Assertions.assertTrue(builder.build().getEnableMetadata()); }
@Override public void handle(HttpExchange httpExchange) throws IOException { Optional<Boolean> operatorsAreReady = areOperatorsStarted(operators); if (operatorsAreReady.isEmpty() || !operatorsAreReady.get()) { sendMessage(httpExchange, HTTP_BAD_REQUEST, "spark operators are not ready yet"); } if (!passRbacCheck()) { sendMessage( httpExchange, HTTP_FORBIDDEN, "required rbac test failed, operators are not ready"); } sendMessage(httpExchange, HTTP_OK, "started"); }
@Test void testHandleSucceed() throws IOException { Operator operator = mock(Operator.class); Operator sparkConfMonitor = mock(Operator.class); RuntimeInfo runtimeInfo = mock(RuntimeInfo.class); RuntimeInfo sparkConfMonitorRuntimeInfo = mock(RuntimeInfo.class); when(operator.getRuntimeInfo()).thenReturn(runtimeInfo); when(runtimeInfo.isStarted()).thenReturn(true); when(sparkConfMonitor.getRuntimeInfo()).thenReturn(sparkConfMonitorRuntimeInfo); when(sparkConfMonitorRuntimeInfo.isStarted()).thenReturn(true); when(sparkConfMonitor.getKubernetesClient()).thenReturn(client); ReadinessProbe readinessProbe = new ReadinessProbe(Arrays.asList(operator)); try (var mockedStatic = Mockito.mockStatic(ProbeUtil.class)) { readinessProbe.handle(httpExchange); mockedStatic.verify(() -> ProbeUtil.sendMessage(httpExchange, HTTP_OK, "started")); } }
@Override public Iterator iterator() { if (entries == null) { return Collections.emptyIterator(); } return new ResultIterator(); }
@Test public void testIterator_whenNotEmpty_IterationType_Key() { List<Map.Entry> entries = new ArrayList<>(); MapEntrySimple entry = new MapEntrySimple("key", "value"); entries.add(entry); ResultSet resultSet = new ResultSet(entries, IterationType.KEY); Iterator iterator = resultSet.iterator(); assertTrue(iterator.hasNext()); assertEquals("key", iterator.next()); }
@Converter(fallback = true) public static <T> T convertTo(Class<T> type, Exchange exchange, Object value, TypeConverterRegistry registry) { if (NodeInfo.class.isAssignableFrom(value.getClass())) { // use a fallback type converter so we can convert the embedded body if the value is NodeInfo NodeInfo ni = (NodeInfo) value; // first try to find a Converter for Node TypeConverter tc = registry.lookup(type, Node.class); if (tc != null) { Node node = NodeOverNodeInfo.wrap(ni); return tc.convertTo(type, exchange, node); } // if this does not exist we can also try NodeList (there are some type converters for that) as // the default Xerces Node implementation also implements NodeList. tc = registry.lookup(type, NodeList.class); if (tc != null) { List<NodeInfo> nil = new LinkedList<>(); nil.add(ni); return tc.convertTo(type, exchange, toDOMNodeList(nil)); } } else if (List.class.isAssignableFrom(value.getClass())) { TypeConverter tc = registry.lookup(type, NodeList.class); if (tc != null) { List<NodeInfo> lion = new LinkedList<>(); for (Object o : (List<?>) value) { if (o instanceof NodeInfo) { lion.add((NodeInfo) o); } } if (!lion.isEmpty()) { NodeList nl = toDOMNodeList(lion); return tc.convertTo(type, exchange, nl); } } } else if (NodeOverNodeInfo.class.isAssignableFrom(value.getClass())) { // NodeOverNode info is a read-only Node implementation from Saxon. In contrast to the JDK // com.sun.org.apache.xerces.internal.dom.NodeImpl class it does not implement NodeList, but // many Camel type converters are based on that interface. Therefore we convert to NodeList and // try type conversion in the fallback type converter. TypeConverter tc = registry.lookup(type, NodeList.class); if (tc != null) { List<Node> domNodeList = new LinkedList<>(); domNodeList.add((NodeOverNodeInfo) value); return tc.convertTo(type, exchange, new DOMNodeList(domNodeList)); } } return null; }
@Test public void convertToNodeAndByteArray() { Node node = context.getTypeConverter().convertTo(Node.class, exchange, doc); assertNotNull(node); byte[] ba = context.getTypeConverter().convertTo(byte[].class, exchange, node); assertNotNull(ba); String string = context.getTypeConverter().convertTo(String.class, exchange, ba); assertEquals(CONTENT, string); }
public static Serializer getSerializer(String alias) { // 工厂模式 托管给ExtensionLoader return EXTENSION_LOADER.getExtension(alias); }
@Test public void getSerializer() { Serializer serializer = SerializerFactory.getSerializer((byte) 117); Assert.assertNotNull(serializer); Assert.assertEquals(TestSerializer.class, serializer.getClass()); }
@Override public BeamSqlTable buildBeamSqlTable(Table table) { Schema schema = table.getSchema(); ObjectNode properties = table.getProperties(); Optional<ParsedLocation> parsedLocation = Optional.empty(); if (!Strings.isNullOrEmpty(table.getLocation())) { parsedLocation = Optional.of(parseLocation(checkArgumentNotNull(table.getLocation()))); } List<String> topics = mergeParam(parsedLocation.map(loc -> loc.topic), (ArrayNode) properties.get("topics")); List<String> allBootstrapServers = mergeParam( parsedLocation.map(loc -> loc.brokerLocation), (ArrayNode) properties.get("bootstrap_servers")); String bootstrapServers = String.join(",", allBootstrapServers); Optional<String> payloadFormat = properties.has("format") ? Optional.of(properties.get("format").asText()) : Optional.empty(); if (Schemas.isNestedSchema(schema)) { Optional<PayloadSerializer> serializer = payloadFormat.map( format -> PayloadSerializers.getSerializer( format, checkArgumentNotNull(schema.getField(PAYLOAD_FIELD).getType().getRowSchema()), TableUtils.convertNode2Map(properties))); return new NestedPayloadKafkaTable(schema, bootstrapServers, topics, serializer); } else { /* * CSV is handled separately because multiple rows can be produced from a single message, which * adds complexity to payload extraction. It remains here and as the default because it is the * historical default, but it will not be extended to support attaching extended attributes to * rows. */ if (payloadFormat.orElse("csv").equals("csv")) { return new BeamKafkaCSVTable(schema, bootstrapServers, topics); } PayloadSerializer serializer = PayloadSerializers.getSerializer( payloadFormat.get(), schema, TableUtils.convertNode2Map(properties)); return new PayloadSerializerKafkaTable(schema, bootstrapServers, topics, serializer); } }
@Test public void testBuildBeamSqlNestedThriftTable() { Table table = mockNestedThriftTable("hello", SimpleThriftMessage.class, TCompactProtocol.Factory.class); BeamSqlTable sqlTable = provider.buildBeamSqlTable(table); assertNotNull(sqlTable); assertTrue(sqlTable instanceof NestedPayloadKafkaTable); BeamKafkaTable kafkaTable = (BeamKafkaTable) sqlTable; assertEquals(LOCATION_BROKER, kafkaTable.getBootstrapServers()); assertEquals(ImmutableList.of(LOCATION_TOPIC), kafkaTable.getTopics()); }
public String name() { return name; }
@Test void name() { LocalPredictionId retrieved = new LocalPredictionId(fileName, name); assertThat(retrieved.name()).isEqualTo(name); }
void registerColumnFamily(String columnFamilyName, ColumnFamilyHandle handle) { boolean columnFamilyAsVariable = options.isColumnFamilyAsVariable(); MetricGroup group = columnFamilyAsVariable ? metricGroup.addGroup(COLUMN_FAMILY_KEY, columnFamilyName) : metricGroup.addGroup(columnFamilyName); for (ForStProperty property : options.getProperties()) { ForStNativePropertyMetricView gauge = new ForStNativePropertyMetricView(handle, property); group.gauge(property.getForStProperty(), gauge); } }
@Test void testReturnsUnsigned() throws Throwable { ForStExtension localForStExtension = new ForStExtension(); localForStExtension.before(); SimpleMetricRegistry registry = new SimpleMetricRegistry(); GenericMetricGroup group = new GenericMetricGroup( registry, UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(), OPERATOR_NAME); ForStNativeMetricOptions options = new ForStNativeMetricOptions(); options.enableSizeAllMemTables(); ForStNativeMetricMonitor monitor = new ForStNativeMetricMonitor( options, group, localForStExtension.getDB(), localForStExtension.getDbOptions().statistics()); ColumnFamilyHandle handle = forStExtension.createNewColumnFamily(COLUMN_FAMILY_NAME); monitor.registerColumnFamily(COLUMN_FAMILY_NAME, handle); ForStNativeMetricMonitor.ForStNativePropertyMetricView view = registry.propertyMetrics.get(0); view.setValue(-1); BigInteger result = view.getValue(); localForStExtension.after(); assertThat(result.signum()) .withFailMessage("Failed to interpret ForSt result as an unsigned long") .isOne(); }
public static void minValueCheck(String name, Long value, long min) { if (value < min) { throw new IllegalArgumentException(name + " cannot be less than <" + min + ">!"); } }
@Test public void testMinValueCheck() { assertThrows(IllegalArgumentException.class, () -> ValueValidationUtil.minValueCheck("param1", 9L, 10L)); ValueValidationUtil.minValueCheck("param2", 10L, 10L); ValueValidationUtil.minValueCheck("param3", 11L, 10L); }
@Override protected Trigger getContinuationTrigger(List<Trigger> continuationTriggers) { return this; }
@Test public void testContinuation() throws Exception { assertEquals(underTest, underTest.getContinuationTrigger()); }
public List<DataRecord> merge(final List<DataRecord> dataRecords) { Map<DataRecord.Key, DataRecord> result = new HashMap<>(); dataRecords.forEach(each -> { if (PipelineSQLOperationType.INSERT == each.getType()) { mergeInsert(each, result); } else if (PipelineSQLOperationType.UPDATE == each.getType()) { mergeUpdate(each, result); } else if (PipelineSQLOperationType.DELETE == each.getType()) { mergeDelete(each, result); } }); return new ArrayList<>(result.values()); }
@Test void assertUpdatePrimaryKeyBeforeDelete() { DataRecord beforeDataRecord = mockUpdateDataRecord(1, 2, 10, 50); DataRecord afterDataRecord = mockDeleteDataRecord(2, 10, 50); Collection<DataRecord> actual = groupEngine.merge(Arrays.asList(beforeDataRecord, afterDataRecord)); assertThat(actual.size(), is(1)); DataRecord dataRecord = actual.iterator().next(); assertThat(dataRecord.getType(), is(PipelineSQLOperationType.DELETE)); assertThat(dataRecord.getTableName(), is("order")); assertThat(dataRecord.getActualTableName(), is("order_0")); assertThat(dataRecord.getCommitTime(), is(789L)); assertColumnsMatched(dataRecord.getColumn(0), new Column("id", 1, null, true, true)); assertColumnsMatched(dataRecord.getColumn(1), new Column("user_id", 10, null, true, false)); assertColumnsMatched(dataRecord.getColumn(2), new Column("total_price", 50, null, true, false)); }
static Props loadPropsFromCommandLineArgs(String[] args) { if (args.length != 1) { throw new IllegalArgumentException("Only a single command-line argument is accepted " + "(absolute path to configuration file)"); } File propertyFile = new File(args[0]); Properties properties = new Properties(); Reader reader = null; try { reader = new InputStreamReader(new FileInputStream(propertyFile), StandardCharsets.UTF_8); properties.load(reader); } catch (Exception e) { throw new IllegalStateException("Could not read properties from file: " + args[0], e); } finally { IOUtils.closeQuietly(reader); deleteQuietly(propertyFile); } return new Props(properties); }
@Test public void loadPropsFromCommandLineArgs_load_properties_from_file() throws Exception { File propsFile = temp.newFile(); FileUtils.write(propsFile, "foo=bar", StandardCharsets.UTF_8); Props result = ConfigurationUtils.loadPropsFromCommandLineArgs(new String[] {propsFile.getAbsolutePath()}); assertThat(result.value("foo")).isEqualTo("bar"); assertThat(result.rawProperties()).hasSize(1); }
void close() throws Exception { destination.close(); mbus.destroy(); }
@Test public void requireThatDynamicThrottlingIsDefault() throws Exception { TestDriver driver = new TestDriver(new FeederParams(), "", null); assertEquals(DynamicThrottlePolicy.class, getThrottlePolicy(driver).getClass()); assertTrue(driver.close()); }
@Override public boolean supportsCatalogsInPrivilegeDefinitions() { return false; }
@Test void assertSupportsCatalogsInPrivilegeDefinitions() { assertFalse(metaData.supportsCatalogsInPrivilegeDefinitions()); }
@Override public Stream<FileSlice> getAllFileSlices(String partitionPath) { return execute(partitionPath, preferredView::getAllFileSlices, (path) -> getSecondaryView().getAllFileSlices(path)); }
@Test public void testGetAllFileSlices() { Stream<FileSlice> actual; Stream<FileSlice> expected = testFileSliceStream; String partitionPath = "/table2"; when(primary.getAllFileSlices(partitionPath)).thenReturn(testFileSliceStream); actual = fsView.getAllFileSlices(partitionPath); assertEquals(expected, actual); verify(secondaryViewSupplier, never()).get(); resetMocks(); when(secondaryViewSupplier.get()).thenReturn(secondary); when(primary.getAllFileSlices(partitionPath)).thenThrow(new RuntimeException()); when(secondary.getAllFileSlices(partitionPath)).thenReturn(testFileSliceStream); actual = fsView.getAllFileSlices(partitionPath); assertEquals(expected, actual); resetMocks(); when(secondary.getAllFileSlices(partitionPath)).thenReturn(testFileSliceStream); actual = fsView.getAllFileSlices(partitionPath); assertEquals(expected, actual); resetMocks(); when(secondary.getAllFileSlices(partitionPath)).thenThrow(new RuntimeException()); assertThrows(RuntimeException.class, () -> { fsView.getAllFileSlices(partitionPath); }); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldParseBigIntegerAsDecimalWithZeroScale() { BigInteger value = BigInteger.valueOf(Long.MAX_VALUE).add(new BigInteger("1")); SchemaAndValue schemaAndValue = Values.parseString( String.valueOf(value) ); assertEquals(Decimal.schema(0), schemaAndValue.schema()); assertInstanceOf(BigDecimal.class, schemaAndValue.value()); assertEquals(value, ((BigDecimal) schemaAndValue.value()).unscaledValue()); value = BigInteger.valueOf(Long.MIN_VALUE).subtract(new BigInteger("1")); schemaAndValue = Values.parseString( String.valueOf(value) ); assertEquals(Decimal.schema(0), schemaAndValue.schema()); assertInstanceOf(BigDecimal.class, schemaAndValue.value()); assertEquals(value, ((BigDecimal) schemaAndValue.value()).unscaledValue()); }
public void syncTableMeta(String dbName, String tableName, boolean forceDeleteData) throws DdlException { Database db = GlobalStateMgr.getCurrentState().getDb(dbName); if (db == null) { throw new DdlException(String.format("db %s does not exist.", dbName)); } Table table = db.getTable(tableName); if (table == null) { throw new DdlException(String.format("table %s does not exist.", tableName)); } if (!table.isCloudNativeTableOrMaterializedView()) { throw new DdlException("only support cloud table or cloud mv."); } syncTableMetaAndColocationInfoInternal(db, (OlapTable) table, forceDeleteData); }
@Test public void testSyncTableMetaTableNotExist() throws Exception { new MockUp<GlobalStateMgr>() { @Mock public Database getDb(String dbName) { return new Database(100, dbName); } }; new MockUp<Database>() { @Mock public Table getTable(String tableName) { return null; } }; Exception exception = Assertions.assertThrows(DdlException.class, () -> { starMgrMetaSyncer.syncTableMeta("db", "table", true); }); }
@Override void toHtml() throws IOException { writeBackLink(); writeln("<br/>"); if (dependencies.isEmpty()) { writeln("#Aucune_dependance#"); return; } writeTitle("beans.png", getString("Dependencies")); final HtmlTable table = new HtmlTable(); table.beginTable(getString("Dependencies")); // table of dependencies inspired by Jenkins "/about" write("<th>Artifact</th><th>#Nom#</th><th>Maven ID</th><th>#Licence#</th>"); for (final Map.Entry<String, MavenArtifact> entry : dependencies.entrySet()) { final String jarFilename = entry.getKey(); final MavenArtifact dependency = entry.getValue(); table.nextRow(); writeDependency(jarFilename, dependency); } table.endTable(); writeln("<div align='right'>" + getFormattedString("nb_dependencies", dependencies.size()) + "</div>"); }
@Test public void testDependencies() throws IOException { final ServletContext context = createNiceMock(ServletContext.class); final String javamelodyDir = "/META-INF/maven/net.bull.javamelody/"; final String webapp = javamelodyDir + "javamelody-test-webapp/"; expect(context.getResourcePaths("/META-INF/maven/")) .andReturn(Collections.singleton(javamelodyDir)).anyTimes(); expect(context.getResourcePaths(javamelodyDir)).andReturn(Collections.singleton(webapp)) .anyTimes(); final IAnswer<InputStream> answer = () -> getClass().getResourceAsStream("/pom.xml"); expect(context.getResourceAsStream(webapp + "pom.xml")).andAnswer(answer).anyTimes(); final Set<String> dependencies = new LinkedHashSet<>( List.of("/WEB-INF/lib/jrobin-1.5.9.jar", "/WEB-INF/lib/javamelody-core-1.65.0.jar", "/WEB-INF/lib/nothing.jar")); expect(context.getResourcePaths("/WEB-INF/lib/")).andReturn(dependencies).anyTimes(); expect(context.getMajorVersion()).andReturn(5).anyTimes(); expect(context.getMinorVersion()).andReturn(0).anyTimes(); replay(context); Parameters.initialize(context); final Map<String, MavenArtifact> webappDependencies = MavenArtifact.getWebappDependencies(); assertFalse("getWebappDependencies", webappDependencies.isEmpty()); verify(context); final StringWriter writer = new StringWriter(); final HtmlDependenciesReport htmlDependenciesReport = new HtmlDependenciesReport( webappDependencies, writer); htmlDependenciesReport.toHtml(); assertNotEmptyAndClear(writer); }
public static Node build(final List<JoinInfo> joins) { Node root = null; for (final JoinInfo join : joins) { if (root == null) { root = new Leaf(join.getLeftSource()); } if (root.containsSource(join.getRightSource()) && root.containsSource(join.getLeftSource())) { throw new KsqlException("Cannot perform circular join - both " + join.getRightSource() + " and " + join.getLeftJoinExpression() + " are already included in the current join tree: " + root.debugString(0)); } else if (root.containsSource(join.getLeftSource())) { root = new Join(root, new Leaf(join.getRightSource()), join); } else if (root.containsSource(join.getRightSource())) { root = new Join(root, new Leaf(join.getLeftSource()), join.flip()); } else { throw new KsqlException( "Cannot build JOIN tree; neither source in the join is the FROM source or included " + "in a previous JOIN: " + join + ". The current join tree is " + root.debugString(0) ); } } return root; }
@Test public void shouldComputeViableKeysWithoutOverlap() { // Given: when(j1.getLeftSource()).thenReturn(a); when(j1.getRightSource()).thenReturn(b); when(j2.getLeftSource()).thenReturn(a); when(j2.getRightSource()).thenReturn(c); when(j1.getLeftJoinExpression()).thenReturn(col1); when(j1.getRightJoinExpression()).thenReturn(col2); when(j2.getLeftJoinExpression()).thenReturn(col3); when(j2.getRightJoinExpression()).thenReturn(col4); final List<JoinInfo> joins = ImmutableList.of(j1, j2); final Node root = JoinTree.build(joins); // When: final List<?> keys = root.viableKeyColumns(); // Then: assertThat(keys, contains(col3, col4)); }
public static int rand(int min, int max) { return RANDOM.nextInt(max) % (max - min + 1) + min; }
@Test public void testRand() { Assert.assertEquals(8, StringKit.rand(8).length()); Assert.assertEquals(10, StringKit.rand(10).length()); for (int i = 0; i < 100; i++) { int num = StringKit.rand(1, 10); Assert.assertTrue(num < 11); Assert.assertTrue(num > 0); } }
@Override void toHtml() throws IOException { writeHtmlHeader(); htmlCoreReport.toHtml(); writeHtmlFooter(); }
@Test public void testDoubleJavaInformations() throws IOException { final List<JavaInformations> myJavaInformationsList = List .of(new JavaInformations(null, true), new JavaInformations(null, true)); final HtmlReport htmlReport = new HtmlReport(collector, null, myJavaInformationsList, Period.TOUT, writer); htmlReport.toHtml(null, null); assertNotEmptyAndClear(writer); }
@Override public DelayMeasurementStatCurrent getDmCurrentStat(MdId mdName, MaIdShort maName, MepId mepId, SoamId dmId) throws CfmConfigException, SoamConfigException { MepEntry mep = cfmMepService.getMep(mdName, maName, mepId); if (mep == null || mep.deviceId() == null) { throw new CfmConfigException("MEP :" + mdName + "/" + maName + "/" + mepId + " does not exist"); } else if (deviceService.getDevice(mep.deviceId()) == null) { throw new CfmConfigException("Device " + mep.deviceId() + " from MEP :" + mdName + "/" + maName + "/" + mepId + " does not exist"); } else if (!deviceService.getDevice(mep.deviceId()).is(SoamDmProgrammable.class)) { throw new CfmConfigException("Device " + mep.deviceId() + " from MEP :" + mdName + "/" + maName + "/" + mepId + " does not implement SoamDmProgrammable"); } log.debug("Retrieving Current Stats for DM {} in MD {}, MA {}, MEP {} " + "on Device {}", dmId, mdName, maName, mepId, mep.deviceId()); return deviceService.getDevice(mep.deviceId()) .as(SoamDmProgrammable.class).getDmCurrentStat(mdName, maName, mepId, dmId); }
@Test public void testGetDmCurrentStat() throws CfmConfigException, SoamConfigException { expect(deviceService.getDevice(DEVICE_ID1)).andReturn(device1).anyTimes(); replay(deviceService); expect(mepService.getMep(MDNAME1, MANAME1, MEPID1)).andReturn(mep1).anyTimes(); replay(mepService); expect(driverService.getDriver(DEVICE_ID1)).andReturn(testDriver).anyTimes(); replay(driverService); DelayMeasurementStatCurrent dmCurrentStat = soamManager.getDmCurrentStat(MDNAME1, MANAME1, MEPID1, DMID101); assertNotNull(dmCurrentStat); assertTrue(dmCurrentStat.startTime().isBefore(Instant.now())); }
BrokerControlStates calculateNextBrokerState(int brokerId, BrokerHeartbeatRequestData request, long registerBrokerRecordOffset, Supplier<Boolean> hasLeaderships) { BrokerHeartbeatState broker = heartbeatStateOrThrow(brokerId); BrokerControlState currentState = currentBrokerState(broker); switch (currentState) { case FENCED: if (request.wantShutDown()) { log.info("Fenced broker {} has requested and been granted an immediate " + "shutdown.", brokerId); return new BrokerControlStates(currentState, SHUTDOWN_NOW); } else if (!request.wantFence()) { if (request.currentMetadataOffset() >= registerBrokerRecordOffset) { log.info("The request from broker {} to unfence has been granted " + "because it has caught up with the offset of its register " + "broker record {}.", brokerId, registerBrokerRecordOffset); return new BrokerControlStates(currentState, UNFENCED); } else { if (log.isDebugEnabled()) { log.debug("The request from broker {} to unfence cannot yet " + "be granted because it has not caught up with the offset of " + "its register broker record {}. It is still at offset {}.", brokerId, registerBrokerRecordOffset, request.currentMetadataOffset()); } return new BrokerControlStates(currentState, FENCED); } } return new BrokerControlStates(currentState, FENCED); case UNFENCED: if (request.wantFence()) { if (request.wantShutDown()) { log.info("Unfenced broker {} has requested and been granted an " + "immediate shutdown.", brokerId); return new BrokerControlStates(currentState, SHUTDOWN_NOW); } else { log.info("Unfenced broker {} has requested and been granted " + "fencing", brokerId); return new BrokerControlStates(currentState, FENCED); } } else if (request.wantShutDown()) { if (hasLeaderships.get()) { log.info("Unfenced broker {} has requested and been granted a " + "controlled shutdown.", brokerId); return new BrokerControlStates(currentState, CONTROLLED_SHUTDOWN); } else { log.info("Unfenced broker {} has requested and been granted an " + "immediate shutdown.", brokerId); return new BrokerControlStates(currentState, SHUTDOWN_NOW); } } return new BrokerControlStates(currentState, UNFENCED); case CONTROLLED_SHUTDOWN: if (hasLeaderships.get()) { log.debug("Broker {} is in controlled shutdown state, but can not " + "shut down because more leaders still need to be moved.", brokerId); return new BrokerControlStates(currentState, CONTROLLED_SHUTDOWN); } long lowestActiveOffset = lowestActiveOffset(); if (broker.controlledShutdownOffset <= lowestActiveOffset) { log.info("The request from broker {} to shut down has been granted " + "since the lowest active offset {} is now greater than the " + "broker's controlled shutdown offset {}.", brokerId, lowestActiveOffset, broker.controlledShutdownOffset); return new BrokerControlStates(currentState, SHUTDOWN_NOW); } log.debug("The request from broker {} to shut down can not yet be granted " + "because the lowest active offset {} is not greater than the broker's " + "shutdown offset {}.", brokerId, lowestActiveOffset, broker.controlledShutdownOffset); return new BrokerControlStates(currentState, CONTROLLED_SHUTDOWN); default: return new BrokerControlStates(currentState, SHUTDOWN_NOW); } }
@Test public void testCalculateNextBrokerState() { BrokerHeartbeatManager manager = newBrokerHeartbeatManager(); for (int brokerId = 0; brokerId < 6; brokerId++) { manager.register(brokerId, true); } manager.touch(0, true, 100); manager.touch(1, false, 98); manager.touch(2, false, 100); manager.touch(3, false, 100); manager.touch(4, true, 100); manager.touch(5, false, 99); manager.maybeUpdateControlledShutdownOffset(5, 99); assertEquals(98L, manager.lowestActiveOffset()); assertEquals(new BrokerControlStates(FENCED, SHUTDOWN_NOW), manager.calculateNextBrokerState(0, new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> false)); assertEquals(new BrokerControlStates(FENCED, UNFENCED), manager.calculateNextBrokerState(0, new BrokerHeartbeatRequestData().setWantFence(false). setCurrentMetadataOffset(100), 100, () -> false)); assertEquals(new BrokerControlStates(FENCED, FENCED), manager.calculateNextBrokerState(0, new BrokerHeartbeatRequestData().setWantFence(false). setCurrentMetadataOffset(50), 100, () -> false)); assertEquals(new BrokerControlStates(FENCED, FENCED), manager.calculateNextBrokerState(0, new BrokerHeartbeatRequestData().setWantFence(true), 100, () -> false)); assertEquals(new BrokerControlStates(UNFENCED, CONTROLLED_SHUTDOWN), manager.calculateNextBrokerState(1, new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> true)); assertEquals(new BrokerControlStates(UNFENCED, SHUTDOWN_NOW), manager.calculateNextBrokerState(1, new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> false)); assertEquals(new BrokerControlStates(UNFENCED, UNFENCED), manager.calculateNextBrokerState(1, new BrokerHeartbeatRequestData().setWantFence(false), 100, () -> false)); assertEquals(new BrokerControlStates(CONTROLLED_SHUTDOWN, CONTROLLED_SHUTDOWN), manager.calculateNextBrokerState(5, new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> true)); assertEquals(new BrokerControlStates(CONTROLLED_SHUTDOWN, CONTROLLED_SHUTDOWN), manager.calculateNextBrokerState(5, new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> false)); manager.fence(1); assertEquals(new BrokerControlStates(CONTROLLED_SHUTDOWN, SHUTDOWN_NOW), manager.calculateNextBrokerState(5, new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> false)); assertEquals(new BrokerControlStates(CONTROLLED_SHUTDOWN, CONTROLLED_SHUTDOWN), manager.calculateNextBrokerState(5, new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> true)); assertEquals("Broker 6 is not registered.", assertThrows(IllegalStateException.class, () -> manager.calculateNextBrokerState(6, new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> true)).getMessage()); assertEquals("Broker 7 is not registered.", assertThrows(IllegalStateException.class, () -> manager.calculateNextBrokerState(7, new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> true)).getMessage()); }
protected void commitTransaction(final Map<TopicPartition, OffsetAndMetadata> offsets, final ConsumerGroupMetadata consumerGroupMetadata) { if (!eosEnabled()) { throw new IllegalStateException(formatException("Exactly-once is not enabled")); } maybeBeginTransaction(); try { // EOS-v2 assumes brokers are on version 2.5+ and thus can understand the full set of consumer group metadata // Thus if we are using EOS-v1 and can't make this assumption, we must downgrade the request to include only the group id metadata final ConsumerGroupMetadata maybeDowngradedGroupMetadata = processingMode == EXACTLY_ONCE_V2 ? consumerGroupMetadata : new ConsumerGroupMetadata(consumerGroupMetadata.groupId()); producer.sendOffsetsToTransaction(offsets, maybeDowngradedGroupMetadata); producer.commitTransaction(); transactionInFlight = false; } catch (final ProducerFencedException | InvalidProducerEpochException | CommitFailedException | InvalidPidMappingException error) { throw new TaskMigratedException( formatException("Producer got fenced trying to commit a transaction"), error ); } catch (final TimeoutException timeoutException) { // re-throw to trigger `task.timeout.ms` throw timeoutException; } catch (final KafkaException error) { throw new StreamsException( formatException("Error encountered trying to commit a transaction"), error ); } }
@Test public void shouldThrowStreamsExceptionOnEosSendOffsetError() { eosAlphaMockProducer.sendOffsetsToTransactionException = new KafkaException("KABOOM!"); final StreamsException thrown = assertThrows( StreamsException.class, // we pass in `null` to verify that `sendOffsetsToTransaction()` fails instead of `commitTransaction()` // `sendOffsetsToTransaction()` would throw an NPE on `null` offsets () -> eosAlphaStreamsProducer.commitTransaction(null, new ConsumerGroupMetadata("appId")) ); assertThat(thrown.getCause(), is(eosAlphaMockProducer.sendOffsetsToTransactionException)); assertThat( thrown.getMessage(), is("Error encountered trying to commit a transaction [test]") ); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { try { final AttributedList<Path> objects = new AttributedList<>(); Marker marker = new Marker(null, null); final String containerId = fileid.getVersionId(containerService.getContainer(directory)); // Seen placeholders final Map<String, Long> revisions = new HashMap<>(); boolean hasDirectoryPlaceholder = containerService.isContainer(directory); do { if(log.isDebugEnabled()) { log.debug(String.format("List directory %s with marker %s", directory, marker)); } final B2ListFilesResponse response; if(versioning.isEnabled()) { // In alphabetical order by file name, and by reverse of date/time uploaded for // versions of files with the same name. response = session.getClient().listFileVersions(containerId, marker.nextFilename, marker.nextFileId, chunksize, this.createPrefix(directory), String.valueOf(Path.DELIMITER)); } else { response = session.getClient().listFileNames(containerId, marker.nextFilename, chunksize, this.createPrefix(directory), String.valueOf(Path.DELIMITER)); } marker = this.parse(directory, objects, response, revisions); if(null == marker.nextFileId) { if(!response.getFiles().isEmpty()) { hasDirectoryPlaceholder = true; } } listener.chunk(directory, objects); } while(marker.hasNext()); if(!hasDirectoryPlaceholder && objects.isEmpty()) { if(log.isWarnEnabled()) { log.warn(String.format("No placeholder found for directory %s", directory)); } throw new NotfoundException(directory.getAbsolute()); } return objects; } catch(B2ApiException e) { throw new B2ExceptionMappingService(fileid).map("Listing directory {0} failed", e, directory); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map(e); } }
@Test public void testListFileNameDotDot() throws Exception { final B2VersionIdProvider fileid = new B2VersionIdProvider(session); final Path bucket = new B2DirectoryFeature(session, fileid).mkdir( new Path(String.format("test-%s", new AsciiRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path file = new B2TouchFeature(session, fileid).touch(new Path(bucket, "..", EnumSet.of(Path.Type.file)), new TransferStatus()); assertEquals(bucket, file.getParent()); assertEquals("..", file.getName()); assertTrue(new B2ObjectListService(session, fileid).list(bucket, new DisabledListProgressListener()).contains(file)); new B2DeleteFeature(session, fileid).delete(Arrays.asList(file, bucket), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public SpringEmbeddedCacheManager getObject() throws Exception { return this.cacheManager; }
@Test public void testIfSpringEmbeddedCacheManagerFactoryBeanCreatesACacheManagerEvenIfNoDefaultConfigurationLocationHasBeenSet() throws Exception { objectUnderTest = SpringEmbeddedCacheManagerFactoryBeanBuilder .defaultBuilder().build(); final SpringEmbeddedCacheManager springEmbeddedCacheManager = objectUnderTest.getObject(); assertNotNull( "getObject() should have returned a valid SpringEmbeddedCacheManager, even if no defaulConfigurationLocation " + "has been specified. However, it returned null.", springEmbeddedCacheManager); }
public static String hashSecretContent(Secret secret) { if (secret == null) { throw new RuntimeException("Secret not found"); } if (secret.getData() == null || secret.getData().isEmpty()) { throw new RuntimeException("Empty secret"); } StringBuilder sb = new StringBuilder(); secret.getData().entrySet().stream() .sorted(Map.Entry.comparingByKey()) .forEach(entry -> sb.append(entry.getKey()).append(entry.getValue())); return Util.hashStub(sb.toString()); }
@Test public void testHashSecretContent() { Secret secret = new SecretBuilder() .addToData(Map.of("username", "foo")) .addToData(Map.of("password", "changeit")) .build(); assertThat(ReconcilerUtils.hashSecretContent(secret), is("756937ae")); }
public static CoordinatorRecord newConsumerGroupMemberSubscriptionTombstoneRecord( String groupId, String memberId ) { return new CoordinatorRecord( new ApiMessageAndVersion( new ConsumerGroupMemberMetadataKey() .setGroupId(groupId) .setMemberId(memberId), (short) 5 ), null // Tombstone. ); }
@Test public void testNewConsumerGroupMemberSubscriptionTombstoneRecord() { CoordinatorRecord expectedRecord = new CoordinatorRecord( new ApiMessageAndVersion( new ConsumerGroupMemberMetadataKey() .setGroupId("group-id") .setMemberId("member-id"), (short) 5 ), null); assertEquals(expectedRecord, newConsumerGroupMemberSubscriptionTombstoneRecord( "group-id", "member-id" )); }
public static String replaceAll(String s, final String what, final String withWhat) { StringBuilder sb = null; int fromIndex = 0; //index of last found '$' char + 1 boolean found = false; int length = s.length(); do { final int index = s.indexOf(what, fromIndex); found = index >= 0; if (found) { if (sb == null) sb = new StringBuilder(s.length() * 2); sb.append(s, fromIndex, index); sb.append(withWhat); fromIndex = index + what.length(); } } while (found && fromIndex < length); if (sb != null) { if (fromIndex < length) sb.append(s, fromIndex, length); return sb.toString(); } else { return s; } }
@Test public void testEscaping() { assertEquals(Escaper.replaceAll("", "$", "$$"), ""); assertEquals(Escaper.replaceAll("$", "$", "$$"), "$$"); assertEquals(Escaper.replaceAll(" $", "$", "$$"), " $$"); assertEquals(Escaper.replaceAll(" $ ", "$", "$$"), " $$ "); assertEquals(Escaper.replaceAll(" $$ ", "$", "$$"), " $$$$ "); assertEquals(Escaper.replaceAll("$ $ ", "$", "$$"), "$$ $$ "); assertEquals(Escaper.replaceAll("$ $ $$", "$", "$$"), "$$ $$ $$$$"); assertEquals(Escaper.replaceAll("$start", "$", "$$"), "$$start"); assertEquals(Escaper.replaceAll("$*$", "$", "$$"), "$$*$$"); }
public WithJsonPath(JsonPath jsonPath, Matcher<T> resultMatcher) { this.jsonPath = jsonPath; this.resultMatcher = resultMatcher; }
@Test public void shouldNotMatchOnInvalidJson() { ReadContext invalidJson = JsonPath.parse("invalid-json"); assertThat(invalidJson, not(withJsonPath("$.expensive", equalTo(10)))); }
@Override public Processor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> get() { return new ContextualProcessor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>>() { private TimestampedKeyValueStore<Bytes, SubscriptionWrapper<K>> store; private Sensor droppedRecordsSensor; @Override public void init(final ProcessorContext<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> context) { super.init(context); final InternalProcessorContext<?, ?> internalProcessorContext = (InternalProcessorContext<?, ?>) context; droppedRecordsSensor = TaskMetrics.droppedRecordsSensor( Thread.currentThread().getName(), internalProcessorContext.taskId().toString(), internalProcessorContext.metrics() ); store = internalProcessorContext.getStateStore(storeName); keySchema.init(context); } @Override public void process(final Record<KO, SubscriptionWrapper<K>> record) { if (record.key() == null && !SubscriptionWrapper.Instruction.PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE.equals(record.value().getInstruction())) { dropRecord(); return; } if (record.value().getVersion() > SubscriptionWrapper.CURRENT_VERSION) { //Guard against modifications to SubscriptionWrapper. Need to ensure that there is compatibility //with previous versions to enable rolling upgrades. Must develop a strategy for upgrading //from older SubscriptionWrapper versions to newer versions. throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version."); } context().forward( record.withKey(new CombinedKey<>(record.key(), record.value().getPrimaryKey())) .withValue(inferChange(record)) .withTimestamp(record.timestamp()) ); } private Change<ValueAndTimestamp<SubscriptionWrapper<K>>> inferChange(final Record<KO, SubscriptionWrapper<K>> record) { if (record.key() == null) { return new Change<>(ValueAndTimestamp.make(record.value(), record.timestamp()), null); } else { return inferBasedOnState(record); } } private Change<ValueAndTimestamp<SubscriptionWrapper<K>>> inferBasedOnState(final Record<KO, SubscriptionWrapper<K>> record) { final Bytes subscriptionKey = keySchema.toBytes(record.key(), record.value().getPrimaryKey()); final ValueAndTimestamp<SubscriptionWrapper<K>> newValue = ValueAndTimestamp.make(record.value(), record.timestamp()); final ValueAndTimestamp<SubscriptionWrapper<K>> oldValue = store.get(subscriptionKey); //This store is used by the prefix scanner in ForeignTableJoinProcessorSupplier if (record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_AND_PROPAGATE) || record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_NO_PROPAGATE)) { store.delete(subscriptionKey); } else { store.put(subscriptionKey, newValue); } return new Change<>(newValue, oldValue); } private void dropRecord() { if (context().recordMetadata().isPresent()) { final RecordMetadata recordMetadata = context().recordMetadata().get(); LOG.warn( "Skipping record due to null foreign key. " + "topic=[{}] partition=[{}] offset=[{}]", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset() ); } else { LOG.warn( "Skipping record due to null foreign key. Topic, partition, and offset not known." ); } droppedRecordsSensor.record(); } }; }
@Test public void shouldDeleteKeyNoPropagateV0() { final StoreBuilder<TimestampedKeyValueStore<Bytes, SubscriptionWrapper<String>>> storeBuilder = storeBuilder(); final SubscriptionReceiveProcessorSupplier<String, String> supplier = supplier(storeBuilder); final Processor<String, SubscriptionWrapper<String>, CombinedKey<String, String>, Change<ValueAndTimestamp<SubscriptionWrapper<String>>>> processor = supplier.get(); stateStore = storeBuilder.build(); context.addStateStore(stateStore); stateStore.init((StateStoreContext) context, stateStore); final SubscriptionWrapper<String> oldWrapper = new SubscriptionWrapper<>( new long[]{1L, 2L}, Instruction.DELETE_KEY_NO_PROPAGATE, PK2, SubscriptionWrapper.VERSION_0, null ); final ValueAndTimestamp<SubscriptionWrapper<String>> oldValue = ValueAndTimestamp.make(oldWrapper, 0); final Bytes key = COMBINED_KEY_SCHEMA.toBytes(FK, PK1); stateStore.put(key, oldValue); processor.init(context); final SubscriptionWrapper<String> newWrapper = new SubscriptionWrapper<>( new long[]{1L, 2L}, Instruction.DELETE_KEY_NO_PROPAGATE, PK1, SubscriptionWrapper.VERSION_0, null ); final ValueAndTimestamp<SubscriptionWrapper<String>> newValue = ValueAndTimestamp.make( newWrapper, 1L); final Record<String, SubscriptionWrapper<String>> record = new Record<>( FK, newWrapper, 1L ); processor.process(record); final List<CapturedForward<? extends CombinedKey<String, String>, ? extends Change<ValueAndTimestamp<SubscriptionWrapper<String>>>>> forwarded = context.forwarded(); assertNull(stateStore.get(key)); assertEquals(1, forwarded.size()); assertEquals( record.withKey(new CombinedKey<>(FK, PK1)) .withValue(new Change<>(newValue, oldValue)), forwarded.get(0).record() ); }
public String getType() { return type; }
@Test public void getType() { JobScheduleParam jobScheduleParam = mock( JobScheduleParam.class ); when( jobScheduleParam.getType() ).thenCallRealMethod(); String type = "hitachi"; ReflectionTestUtils.setField( jobScheduleParam, "type", type ); Assert.assertEquals( type, jobScheduleParam.getType() ); }
public static boolean isUnanimousCandidate( final ClusterMember[] clusterMembers, final ClusterMember candidate, final int gracefulClosedLeaderId) { int possibleVotes = 0; for (final ClusterMember member : clusterMembers) { if (member.id == gracefulClosedLeaderId) { continue; } if (NULL_POSITION == member.logPosition || compareLog(candidate, member) < 0) { return false; } possibleVotes++; } return possibleVotes >= ClusterMember.quorumThreshold(clusterMembers.length); }
@Test void isUnanimousCandidateReturnFalseIfLeaderClosesGracefully() { final int gracefulClosedLeaderId = 1; final ClusterMember candidate = newMember(2, 2, 100); final ClusterMember[] members = new ClusterMember[] { newMember(1, 2, 100), newMember(2, 2, 100), }; assertFalse(isUnanimousCandidate(members, candidate, gracefulClosedLeaderId)); }
public static <T> @Nullable String getClassNameOrNull(@Nullable T value) { if (value != null) { return value.getClass().getName(); } return null; }
@Test public void testGetClassNameOrNullNull() { assertNull(SingleStoreUtil.getClassNameOrNull(null)); }
public <KEY> Where<KEY> where(KeySelector<T1, KEY> keySelector) { requireNonNull(keySelector); final TypeInformation<KEY> keyType = TypeExtractor.getKeySelectorTypes(keySelector, input1.getType()); return where(keySelector, keyType); }
@Test void testSetAllowedLateness() { Duration lateness = Duration.ofMillis(42L); JoinedStreams.WithWindow<String, String, String, TimeWindow> withLateness = dataStream1 .join(dataStream2) .where(keySelector) .equalTo(keySelector) .window(tsAssigner) .allowedLateness(lateness); assertThat(withLateness.getAllowedLatenessDuration()).hasValue(lateness); }
@Override public List<Integer> applyTransforms(List<Integer> originalGlyphIds) { List<Integer> intermediateGlyphsFromGsub = adjustRephPosition(originalGlyphIds); intermediateGlyphsFromGsub = repositionGlyphs(intermediateGlyphsFromGsub); for (String feature : FEATURES_IN_ORDER) { if (!gsubData.isFeatureSupported(feature)) { if (feature.equals(RKRF_FEATURE) && gsubData.isFeatureSupported(VATU_FEATURE)) { // Create your own rkrf feature from vatu feature intermediateGlyphsFromGsub = applyRKRFFeature( gsubData.getFeature(VATU_FEATURE), intermediateGlyphsFromGsub); } LOG.debug("the feature {} was not found", feature); continue; } LOG.debug("applying the feature {}", feature); ScriptFeature scriptFeature = gsubData.getFeature(feature); intermediateGlyphsFromGsub = applyGsubFeature(scriptFeature, intermediateGlyphsFromGsub); } return Collections.unmodifiableList(intermediateGlyphsFromGsub); }
@Test void testApplyTransforms_blwf() { // given List<Integer> glyphsAfterGsub = Arrays.asList(602,336,516); // when List<Integer> result = gsubWorkerForDevanagari.applyTransforms(getGlyphIds("ह्रट्र")); // then assertEquals(glyphsAfterGsub, result); }
public static Boolean jqBoolean(String value, String expression) { return H2Functions.jq(value, expression, JsonNode::asBoolean); }
@Test public void jqBoolean() { Boolean jqString = H2Functions.jqBoolean("{\"a\": true}", ".a"); assertThat(jqString, is(true)); }
public static ReceiptHandle decode(String receiptHandle) { List<String> dataList = Arrays.asList(receiptHandle.split(SEPARATOR)); if (dataList.size() < 8) { throw new IllegalArgumentException("Parse failed, dataList size " + dataList.size()); } long startOffset = Long.parseLong(dataList.get(0)); long retrieveTime = Long.parseLong(dataList.get(1)); long invisibleTime = Long.parseLong(dataList.get(2)); int reviveQueueId = Integer.parseInt(dataList.get(3)); String topicType = dataList.get(4); String brokerName = dataList.get(5); int queueId = Integer.parseInt(dataList.get(6)); long offset = Long.parseLong(dataList.get(7)); long commitLogOffset = -1L; if (dataList.size() >= 9) { commitLogOffset = Long.parseLong(dataList.get(8)); } return new ReceiptHandleBuilder() .startOffset(startOffset) .retrieveTime(retrieveTime) .invisibleTime(invisibleTime) .reviveQueueId(reviveQueueId) .topicType(topicType) .brokerName(brokerName) .queueId(queueId) .offset(offset) .commitLogOffset(commitLogOffset) .receiptHandle(receiptHandle).build(); }
@Test(expected = IllegalArgumentException.class) public void testDecodeWithInvalidString() { String invalidReceiptHandle = "invalid_data"; ReceiptHandle.decode(invalidReceiptHandle); }
public static String convertToString(Object parsedValue, Type type) { if (parsedValue == null) { return null; } if (type == null) { return parsedValue.toString(); } switch (type) { case BOOLEAN: case SHORT: case INT: case LONG: case DOUBLE: case STRING: case PASSWORD: return parsedValue.toString(); case LIST: List<?> valueList = (List<?>) parsedValue; return valueList.stream().map(Object::toString).collect(Collectors.joining(",")); case CLASS: Class<?> clazz = (Class<?>) parsedValue; return clazz.getName(); default: throw new IllegalStateException("Unknown type."); } }
@Test public void testConvertValueToStringShort() { assertEquals("32767", ConfigDef.convertToString(Short.MAX_VALUE, Type.SHORT)); assertNull(ConfigDef.convertToString(null, Type.SHORT)); }
@Override public void d(String tag, String message, Object... args) { }
@Test public void debugNotLogged() { String expectedTag = "TestTag"; logger.d(expectedTag, "Hello %s", "World"); assertNotLogged(); }
public Map<String, String> getCharacteristics() { return characteristics; }
@Test public void setCharacteristics_null_is_considered_as_empty() { CeTask task = underTest.setType("TYPE_1").setUuid("UUID_1") .setCharacteristics(null) .build(); assertThat(task.getCharacteristics()).isEmpty(); }
public static Configuration configurePythonDependencies(ReadableConfig config) { final PythonDependencyManager pythonDependencyManager = new PythonDependencyManager(config); final Configuration pythonDependencyConfig = new Configuration(); pythonDependencyManager.applyToConfiguration(pythonDependencyConfig); return pythonDependencyConfig; }
@Test void testPythonExecutables() { Configuration config = new Configuration(); config.set(PYTHON_EXECUTABLE, "venv/bin/python3"); config.set(PYTHON_CLIENT_EXECUTABLE, "python37"); Configuration actual = configurePythonDependencies(config); Configuration expectedConfiguration = new Configuration(); expectedConfiguration.set(PYTHON_EXECUTABLE, "venv/bin/python3"); expectedConfiguration.set(PYTHON_CLIENT_EXECUTABLE, "python37"); verifyConfiguration(expectedConfiguration, actual); }
@Override public Cost multiplyBy(double factor) { if (isInfinite()) { return INFINITY; } return new Cost(rows * factor, cpu * factor, network * factor); }
@Test public void testMultiply() { CostFactory factory = CostFactory.INSTANCE; Cost originalCost = factory.makeCost(1.0d, 2.0d, 3.0d); Cost multipliedCost = factory.makeCost(3.0d, 6.0d, 9.0d); assertEquals(multipliedCost, originalCost.multiplyBy(3.0d)); Cost infiniteCost = factory.makeInfiniteCost(); assertEquals(infiniteCost, infiniteCost.multiplyBy(3.0d)); }
private Function<KsqlConfig, Kudf> getUdfFactory( final Method method, final UdfDescription udfDescriptionAnnotation, final String functionName, final FunctionInvoker invoker, final String sensorName ) { return ksqlConfig -> { final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance( method.getDeclaringClass(), udfDescriptionAnnotation.name()); if (actualUdf instanceof Configurable) { ExtensionSecurityManager.INSTANCE.pushInUdf(); try { ((Configurable) actualUdf) .configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName)); } finally { ExtensionSecurityManager.INSTANCE.popOutUdf(); } } final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf); return metrics.<Kudf>map(m -> new UdfMetricProducer( m.getSensor(sensorName), theUdf, Time.SYSTEM )).orElse(theUdf); }; }
@Test public void shouldCreateUdfFactoryWithJarPathWhenExternal() { final UdfFactory tostring = FUNC_REG.getUdfFactory(FunctionName.of("tostring")); String expectedPath = String.join(File.separator, "src", "test", "resources", "udf-example.jar"); assertThat(tostring.getMetadata().getPath(), equalTo(expectedPath)); }
@Override public void failover(NamedNode master) { connection.sync(RedisCommands.SENTINEL_FAILOVER, master.getName()); }
@Test public void testFailover() throws InterruptedException { Collection<RedisServer> masters = connection.masters(); connection.failover(masters.iterator().next()); Thread.sleep(10000); RedisServer newMaster = connection.masters().iterator().next(); assertThat(masters.iterator().next().getPort()).isNotEqualTo(newMaster.getPort()); }
public TimestampOffset entry(int n) { return maybeLock(lock, () -> { if (n >= entries()) throw new IllegalArgumentException("Attempt to fetch the " + n + "th entry from time index " + file().getAbsolutePath() + " which has size " + entries()); return parseEntry(mmap(), n); }); }
@Test public void testEntryOverflow() { assertThrows(IllegalArgumentException.class, () -> idx.entry(0)); }
@Override public void triggerAfterCompletion(GlobalTransaction tx) { if (tx.getGlobalTransactionRole() == GlobalTransactionRole.Launcher) { for (TransactionHook hook : getCurrentHooks()) { try { hook.afterCompletion(); } catch (Exception e) { LOGGER.error("Failed execute afterCompletion in hook {}", e.getMessage(), e); } } } }
@Test public void testTriggerAfterCompletion() { MockedStatic<TransactionHookManager> enhancedTransactionHookManager = Mockito.mockStatic(TransactionHookManager.class); enhancedTransactionHookManager.when(TransactionHookManager::getHooks).thenReturn(Collections.singletonList(new MockTransactionHook())); MockGlobalTransaction mockGlobalTransaction = new MockGlobalTransaction(); Assertions.assertDoesNotThrow(() -> sagaTransactionalTemplate.triggerAfterCompletion(mockGlobalTransaction)); enhancedTransactionHookManager.close(); }
public String generateError(String error) { Map<String, String> values = ImmutableMap.of( PARAMETER_TOTAL_WIDTH, valueOf(MARGIN + computeWidth(error) + MARGIN), PARAMETER_LABEL, error); StringSubstitutor strSubstitutor = new StringSubstitutor(values); return strSubstitutor.replace(errorTemplate); }
@Test public void fail_when_unknown_character() { initSvgGenerator(); assertThatThrownBy(() -> underTest.generateError("Méssage with accent")) .hasMessage("Invalid character 'é'"); }
public ImmutableSet<String> loadAllMessageStreams(final StreamPermissions streamPermissions) { return allStreamsProvider.get() // Unless explicitly queried, exclude event and failure indices by default // Having these indices in every search, makes sorting almost impossible // because it triggers https://github.com/Graylog2/graylog2-server/issues/6378 // TODO: this filter could be removed, once we implement https://github.com/Graylog2/graylog2-server/issues/6490 .filter(id -> !NON_MESSAGE_STREAM_IDS.contains(id)) .filter(streamPermissions::canReadStream) .collect(ImmutableSet.toImmutableSet()); }
@Test public void findsStreams() { final PermittedStreams sut = new PermittedStreams(() -> java.util.stream.Stream.of("oans", "zwoa", "gsuffa")); ImmutableSet<String> result = sut.loadAllMessageStreams(id -> true); assertThat(result).containsExactlyInAnyOrder("oans", "zwoa", "gsuffa"); }
public static <T> MeshRuleCache<T> build( String protocolServiceKey, BitList<Invoker<T>> invokers, Map<String, VsDestinationGroup> vsDestinationGroupMap) { if (CollectionUtils.isNotEmptyMap(vsDestinationGroupMap)) { BitList<Invoker<T>> unmatchedInvokers = new BitList<>(invokers.getOriginList(), true); Map<String, Map<String, BitList<Invoker<T>>>> totalSubsetMap = new HashMap<>(); for (Invoker<T> invoker : invokers) { String remoteApplication = invoker.getUrl().getRemoteApplication(); if (StringUtils.isEmpty(remoteApplication) || INVALID_APP_NAME.equals(remoteApplication)) { unmatchedInvokers.add(invoker); continue; } VsDestinationGroup vsDestinationGroup = vsDestinationGroupMap.get(remoteApplication); if (vsDestinationGroup == null) { unmatchedInvokers.add(invoker); continue; } Map<String, BitList<Invoker<T>>> subsetMap = totalSubsetMap.computeIfAbsent(remoteApplication, (k) -> new HashMap<>()); boolean matched = false; for (DestinationRule destinationRule : vsDestinationGroup.getDestinationRuleList()) { DestinationRuleSpec destinationRuleSpec = destinationRule.getSpec(); List<Subset> subsetList = destinationRuleSpec.getSubsets(); for (Subset subset : subsetList) { String subsetName = subset.getName(); List<Invoker<T>> subsetInvokers = subsetMap.computeIfAbsent( subsetName, (k) -> new BitList<>(invokers.getOriginList(), true)); Map<String, String> labels = subset.getLabels(); if (isLabelMatch(invoker.getUrl(), protocolServiceKey, labels)) { subsetInvokers.add(invoker); matched = true; } } } if (!matched) { unmatchedInvokers.add(invoker); } } return new MeshRuleCache<>( new LinkedList<>(vsDestinationGroupMap.keySet()), Collections.unmodifiableMap(vsDestinationGroupMap), Collections.unmodifiableMap(totalSubsetMap), unmatchedInvokers); } else { return new MeshRuleCache<>( Collections.emptyList(), Collections.emptyMap(), Collections.emptyMap(), invokers); } }
@Test void testBuild() { BitList<Invoker<Object>> invokers = new BitList<>(Arrays.asList(createInvoker(""), createInvoker("unknown"), createInvoker("app1"))); Subset subset = new Subset(); subset.setName("TestSubset"); DestinationRule destinationRule = new DestinationRule(); DestinationRuleSpec destinationRuleSpec = new DestinationRuleSpec(); destinationRuleSpec.setSubsets(Collections.singletonList(subset)); destinationRule.setSpec(destinationRuleSpec); VsDestinationGroup vsDestinationGroup = new VsDestinationGroup(); vsDestinationGroup.getDestinationRuleList().add(destinationRule); Map<String, VsDestinationGroup> vsDestinationGroupMap = new HashMap<>(); vsDestinationGroupMap.put("app1", vsDestinationGroup); MeshRuleCache<Object> cache = MeshRuleCache.build("test", invokers, vsDestinationGroupMap); assertEquals(2, cache.getUnmatchedInvokers().size()); assertEquals(1, cache.getSubsetInvokers("app1", "TestSubset").size()); subset.setLabels(Collections.singletonMap("test", "test")); cache = MeshRuleCache.build("test", invokers, vsDestinationGroupMap); assertEquals(3, cache.getUnmatchedInvokers().size()); assertEquals(0, cache.getSubsetInvokers("app1", "TestSubset").size()); invokers = new BitList<>(Arrays.asList( createInvoker(""), createInvoker("unknown"), createInvoker("app1"), createInvoker("app2"))); subset.setLabels(null); cache = MeshRuleCache.build("test", invokers, vsDestinationGroupMap); assertEquals(3, cache.getUnmatchedInvokers().size()); assertEquals(1, cache.getSubsetInvokers("app1", "TestSubset").size()); assertEquals(0, cache.getSubsetInvokers("app2", "TestSubset").size()); }
@Override @TpsControl(pointName = "ConfigPublish") @Secured(action = ActionTypes.WRITE, signType = SignType.CONFIG) @ExtractorManager.Extractor(rpcExtractor = ConfigRequestParamExtractor.class) public ConfigPublishResponse handle(ConfigPublishRequest request, RequestMeta meta) throws NacosException { try { String dataId = request.getDataId(); String group = request.getGroup(); String content = request.getContent(); final String tenant = request.getTenant(); final String srcIp = meta.getClientIp(); final String requestIpApp = request.getAdditionParam("requestIpApp"); final String tag = request.getAdditionParam("tag"); final String appName = request.getAdditionParam("appName"); final String type = request.getAdditionParam("type"); final String srcUser = request.getAdditionParam("src_user"); final String encryptedDataKey = request.getAdditionParam("encryptedDataKey"); // check tenant ParamUtils.checkParam(dataId, group, "datumId", content); ParamUtils.checkParam(tag); Map<String, Object> configAdvanceInfo = new HashMap<>(10); MapUtil.putIfValNoNull(configAdvanceInfo, "config_tags", request.getAdditionParam("config_tags")); MapUtil.putIfValNoNull(configAdvanceInfo, "desc", request.getAdditionParam("desc")); MapUtil.putIfValNoNull(configAdvanceInfo, "use", request.getAdditionParam("use")); MapUtil.putIfValNoNull(configAdvanceInfo, "effect", request.getAdditionParam("effect")); MapUtil.putIfValNoNull(configAdvanceInfo, "type", type); MapUtil.putIfValNoNull(configAdvanceInfo, "schema", request.getAdditionParam("schema")); ParamUtils.checkParam(configAdvanceInfo); if (AggrWhitelist.isAggrDataId(dataId)) { Loggers.REMOTE_DIGEST.warn("[aggr-conflict] {} attempt to publish single data, {}, {}", srcIp, dataId, group); throw new NacosException(NacosException.NO_RIGHT, "dataId:" + dataId + " is aggr"); } ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content); configInfo.setMd5(request.getCasMd5()); configInfo.setType(type); configInfo.setEncryptedDataKey(encryptedDataKey); String betaIps = request.getAdditionParam("betaIps"); ConfigOperateResult configOperateResult = null; String persistEvent = ConfigTraceService.PERSISTENCE_EVENT; if (StringUtils.isBlank(betaIps)) { if (StringUtils.isBlank(tag)) { if (StringUtils.isNotBlank(request.getCasMd5())) { configOperateResult = configInfoPersistService.insertOrUpdateCas(srcIp, srcUser, configInfo, configAdvanceInfo); if (!configOperateResult.isSuccess()) { return ConfigPublishResponse.buildFailResponse(ResponseCode.FAIL.getCode(), "Cas publish fail,server md5 may have changed."); } } else { configOperateResult = configInfoPersistService.insertOrUpdate(srcIp, srcUser, configInfo, configAdvanceInfo); } ConfigChangePublisher.notifyConfigChange(new ConfigDataChangeEvent(false, dataId, group, tenant, configOperateResult.getLastModified())); } else { if (StringUtils.isNotBlank(request.getCasMd5())) { configOperateResult = configInfoTagPersistService.insertOrUpdateTagCas(configInfo, tag, srcIp, srcUser); if (!configOperateResult.isSuccess()) { return ConfigPublishResponse.buildFailResponse(ResponseCode.FAIL.getCode(), "Cas publish tag config fail,server md5 may have changed."); } } else { configOperateResult = configInfoTagPersistService.insertOrUpdateTag(configInfo, tag, srcIp, srcUser); } persistEvent = ConfigTraceService.PERSISTENCE_EVENT_TAG + "-" + tag; ConfigChangePublisher.notifyConfigChange( new ConfigDataChangeEvent(false, dataId, group, tenant, tag, configOperateResult.getLastModified())); } } else { // beta publish if (StringUtils.isNotBlank(request.getCasMd5())) { configOperateResult = configInfoBetaPersistService.insertOrUpdateBetaCas(configInfo, betaIps, srcIp, srcUser); if (!configOperateResult.isSuccess()) { return ConfigPublishResponse.buildFailResponse(ResponseCode.FAIL.getCode(), "Cas publish beta config fail,server md5 may have changed."); } } else { configOperateResult = configInfoBetaPersistService.insertOrUpdateBeta(configInfo, betaIps, srcIp, srcUser); } persistEvent = ConfigTraceService.PERSISTENCE_EVENT_BETA; ConfigChangePublisher.notifyConfigChange( new ConfigDataChangeEvent(true, dataId, group, tenant, configOperateResult.getLastModified())); } ConfigTraceService.logPersistenceEvent(dataId, group, tenant, requestIpApp, configOperateResult.getLastModified(), srcIp, persistEvent, ConfigTraceService.PERSISTENCE_TYPE_PUB, content); return ConfigPublishResponse.buildSuccessResponse(); } catch (Exception e) { Loggers.REMOTE_DIGEST.error("[ConfigPublishRequestHandler] publish config error ,request ={}", request, e); return ConfigPublishResponse.buildFailResponse( (e instanceof NacosException) ? ((NacosException) e).getErrCode() : ResponseCode.FAIL.getCode(), e.getMessage()); } }
@Test void testNormalPublishConfigNotCas() throws Exception { String dataId = "testNormalPublishConfigNotCas"; String group = "group"; String tenant = "tenant"; String content = "content"; ConfigPublishRequest configPublishRequest = new ConfigPublishRequest(); configPublishRequest.setDataId(dataId); configPublishRequest.setGroup(group); configPublishRequest.setTenant(tenant); configPublishRequest.setContent(content); Map<String, String> keyMap = new HashMap<>(); String srcUser = "src_user111"; keyMap.put("src_user", srcUser); configPublishRequest.setAdditionMap(keyMap); RequestMeta requestMeta = new RequestMeta(); String clientIp = "127.0.0.1"; requestMeta.setClientIp(clientIp); AtomicReference<ConfigDataChangeEvent> reference = new AtomicReference<>(); NotifyCenter.registerSubscriber(new Subscriber() { @Override public void onEvent(Event event) { ConfigDataChangeEvent event1 = (ConfigDataChangeEvent) event; if (event1.dataId.equals(dataId)) { reference.set((ConfigDataChangeEvent) event); } } @Override public Class<? extends Event> subscribeType() { return ConfigDataChangeEvent.class; } }); ConfigOperateResult configOperateResult = new ConfigOperateResult(true); long timestamp = System.currentTimeMillis(); long id = timestamp / 1000; configOperateResult.setId(id); configOperateResult.setLastModified(timestamp); when(configInfoPersistService.insertOrUpdate(eq(requestMeta.getClientIp()), eq(srcUser), any(ConfigInfo.class), any(Map.class))).thenReturn(configOperateResult); ConfigPublishResponse response = configPublishRequestHandler.handle(configPublishRequest, requestMeta); assertEquals(ResponseCode.SUCCESS.getCode(), response.getResultCode()); Thread.sleep(500L); assertTrue(reference.get() != null); assertEquals(dataId, reference.get().dataId); assertEquals(group, reference.get().group); assertEquals(tenant, reference.get().tenant); assertEquals(timestamp, reference.get().lastModifiedTs); assertFalse(reference.get().isBatch); assertFalse(reference.get().isBeta); }
public static String padStart(String str, int targetLength, char padString) { while (str.length() < targetLength) { str = padString + str; } return str; }
@Test public void padStart_Test() { String binaryString = "010011"; String expected = "00010011"; Assertions.assertEquals(expected, TbUtils.padStart(binaryString, 8, '0')); binaryString = "1001010011"; expected = "1001010011"; Assertions.assertEquals(expected, TbUtils.padStart(binaryString, 8, '0')); binaryString = "1001010011"; expected = "******1001010011"; Assertions.assertEquals(expected, TbUtils.padStart(binaryString, 16, '*')); String fullNumber = "203439900FFCD5581"; String last4Digits = fullNumber.substring(11); expected = "***********CD5581"; Assertions.assertEquals(expected, TbUtils.padStart(last4Digits, fullNumber.length(), '*')); }
@Override public void execute(final ChannelHandlerContext context, final Object message, final DatabaseProtocolFrontendEngine databaseProtocolFrontendEngine, final ConnectionSession connectionSession) { ExecutorService executorService = determineSuitableExecutorService(connectionSession); context.channel().config().setAutoRead(false); executorService.execute(new CommandExecutorTask(databaseProtocolFrontendEngine, connectionSession, context, message)); }
@Test void assertExecuteWithDistributedTransaction() { ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); ConnectionSession connectionSession = mock(ConnectionSession.class, RETURNS_DEEP_STUBS); when(connectionSession.getConnectionId()).thenReturn(1); ExecutorService executorService = registerMockExecutorService(1); new OKProxyState().execute(context, null, mock(DatabaseProtocolFrontendEngine.class), connectionSession); verify(executorService).execute(any(CommandExecutorTask.class)); ConnectionThreadExecutorGroup.getInstance().unregisterAndAwaitTermination(1); }