focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static int tryReadFully(final InputStream in, final byte[] buf) throws IOException { int totalRead = 0; while (totalRead != buf.length) { int read = in.read(buf, totalRead, buf.length - totalRead); if (read == -1) { break; } totalRead += read; } return totalRead; }
@Test void testTryReadFullyFromLongerStream() throws IOException { ByteArrayInputStream inputStream = new ByteArrayInputStream("test-data".getBytes(StandardCharsets.UTF_8)); byte[] out = new byte[4]; int read = IOUtils.tryReadFully(inputStream, out); assertThat(Arrays.copyOfRange(out, 0, read)) .containsExactly("test".getBytes(StandardCharsets.UTF_8)); }
public static L3ModificationInstruction copyTtlOut() { return new ModTtlInstruction(L3SubType.TTL_OUT); }
@Test public void testCopyTtlOutMethod() { final Instruction instruction = Instructions.copyTtlOut(); final L3ModificationInstruction.ModTtlInstruction modTtlInstruction = checkAndConvert(instruction, Instruction.Type.L3MODIFICATION, L3ModificationInstruction.ModTtlInstruction.class); assertThat(modTtlInstruction.subtype(), is(L3ModificationInstruction.L3SubType.TTL_OUT)); }
@Override public SchemaResult getKeySchema( final Optional<String> topicName, final Optional<Integer> schemaId, final FormatInfo expectedFormat, final SerdeFeatures serdeFeatures ) { return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, true); }
@Test public void shouldRequestCorrectSchemaOnGetKeySchemaWithId() throws Exception { // When: supplier.getKeySchema(Optional.of(TOPIC_NAME), Optional.of(42), expectedFormat, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES)); // Then: verify(srClient).getSchemaBySubjectAndId(TOPIC_NAME + "-key", 42); }
public static String substVars(String val, PropertyContainer pc1) throws ScanException { return substVars(val, pc1, null); }
@Test public void nestedEmptyVariables() throws ScanException { Exception e = assertThrows(Exception.class, () -> { OptionHelper.substVars("${${${}}}", context); }); String expectedMessage = CIRCULAR_VARIABLE_REFERENCE_DETECTED+"${ ? ? } --> ${ ? } --> ${}]"; assertEquals(expectedMessage, e.getMessage()); }
public FetchDataInfo read(RemoteStorageFetchInfo remoteStorageFetchInfo) throws RemoteStorageException, IOException { int fetchMaxBytes = remoteStorageFetchInfo.fetchMaxBytes; TopicPartition tp = remoteStorageFetchInfo.topicPartition; FetchRequest.PartitionData fetchInfo = remoteStorageFetchInfo.fetchInfo; boolean includeAbortedTxns = remoteStorageFetchInfo.fetchIsolation == FetchIsolation.TXN_COMMITTED; long offset = fetchInfo.fetchOffset; int maxBytes = Math.min(fetchMaxBytes, fetchInfo.maxBytes); Optional<UnifiedLog> logOptional = fetchLog.apply(tp); OptionalInt epoch = OptionalInt.empty(); if (logOptional.isPresent()) { Option<LeaderEpochFileCache> leaderEpochCache = logOptional.get().leaderEpochCache(); if (leaderEpochCache != null && leaderEpochCache.isDefined()) { epoch = leaderEpochCache.get().epochForOffset(offset); } } Optional<RemoteLogSegmentMetadata> rlsMetadataOptional = epoch.isPresent() ? fetchRemoteLogSegmentMetadata(tp, epoch.getAsInt(), offset) : Optional.empty(); if (!rlsMetadataOptional.isPresent()) { String epochStr = (epoch.isPresent()) ? Integer.toString(epoch.getAsInt()) : "NOT AVAILABLE"; throw new OffsetOutOfRangeException("Received request for offset " + offset + " for leader epoch " + epochStr + " and partition " + tp + " which does not exist in remote tier."); } RemoteLogSegmentMetadata remoteLogSegmentMetadata = rlsMetadataOptional.get(); InputStream remoteSegInputStream = null; try { int startPos = 0; RecordBatch firstBatch = null; // Iteration over multiple RemoteSegmentMetadata is required in case of log compaction. // It may be possible the offset is log compacted in the current RemoteLogSegmentMetadata // And we need to iterate over the next segment metadata to fetch messages higher than the given offset. while (firstBatch == null && rlsMetadataOptional.isPresent()) { remoteLogSegmentMetadata = rlsMetadataOptional.get(); // Search forward for the position of the last offset that is greater than or equal to the target offset startPos = lookupPositionForOffset(remoteLogSegmentMetadata, offset); remoteSegInputStream = remoteLogStorageManager.fetchLogSegment(remoteLogSegmentMetadata, startPos); RemoteLogInputStream remoteLogInputStream = getRemoteLogInputStream(remoteSegInputStream); firstBatch = findFirstBatch(remoteLogInputStream, offset); if (firstBatch == null) { rlsMetadataOptional = findNextSegmentMetadata(rlsMetadataOptional.get(), logOptional.get().leaderEpochCache()); } } if (firstBatch == null) return new FetchDataInfo(new LogOffsetMetadata(offset), MemoryRecords.EMPTY, false, includeAbortedTxns ? Optional.of(Collections.emptyList()) : Optional.empty()); int firstBatchSize = firstBatch.sizeInBytes(); // An empty record is sent instead of an incomplete batch when // - there is no minimum-one-message constraint and // - the first batch size is more than maximum bytes that can be sent and // - for FetchRequest version 3 or above. if (!remoteStorageFetchInfo.minOneMessage && !remoteStorageFetchInfo.hardMaxBytesLimit && firstBatchSize > maxBytes) { return new FetchDataInfo(new LogOffsetMetadata(offset), MemoryRecords.EMPTY); } int updatedFetchSize = remoteStorageFetchInfo.minOneMessage && firstBatchSize > maxBytes ? firstBatchSize : maxBytes; ByteBuffer buffer = ByteBuffer.allocate(updatedFetchSize); int remainingBytes = updatedFetchSize; firstBatch.writeTo(buffer); remainingBytes -= firstBatchSize; if (remainingBytes > 0) { // read the input stream until min of (EOF stream or buffer's remaining capacity). Utils.readFully(remoteSegInputStream, buffer); } buffer.flip(); FetchDataInfo fetchDataInfo = new FetchDataInfo( new LogOffsetMetadata(offset, remoteLogSegmentMetadata.startOffset(), startPos), MemoryRecords.readableRecords(buffer)); if (includeAbortedTxns) { fetchDataInfo = addAbortedTransactions(firstBatch.baseOffset(), remoteLogSegmentMetadata, fetchDataInfo, logOptional.get()); } return fetchDataInfo; } finally { Utils.closeQuietly(remoteSegInputStream, "RemoteLogSegmentInputStream"); } }
@Test public void testReadForFirstBatchInLogCompaction() throws RemoteStorageException, IOException { FileInputStream fileInputStream = mock(FileInputStream.class); RemoteLogInputStream remoteLogInputStream = mock(RemoteLogInputStream.class); ClassLoaderAwareRemoteStorageManager rsmManager = mock(ClassLoaderAwareRemoteStorageManager.class); RemoteLogSegmentMetadata segmentMetadata = mock(RemoteLogSegmentMetadata.class); LeaderEpochFileCache cache = mock(LeaderEpochFileCache.class); when(cache.epochForOffset(anyLong())).thenReturn(OptionalInt.of(1)); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); int fetchOffset = 0; int fetchMaxBytes = 10; int recordBatchSizeInBytes = fetchMaxBytes + 1; RecordBatch firstBatch = mock(RecordBatch.class); ArgumentCaptor<ByteBuffer> capture = ArgumentCaptor.forClass(ByteBuffer.class); FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData( Uuid.randomUuid(), fetchOffset, 0, fetchMaxBytes, Optional.empty() ); when(rsmManager.fetchLogSegment(any(), anyInt())).thenReturn(fileInputStream); when(segmentMetadata.topicIdPartition()).thenReturn(new TopicIdPartition(Uuid.randomUuid(), tp)); // Fetching first time FirstBatch return null because of log compaction. // Fetching second time FirstBatch return data. when(remoteLogInputStream.nextBatch()).thenReturn(null, firstBatch); // Return last offset greater than the requested offset. when(firstBatch.lastOffset()).thenReturn(2L); when(firstBatch.sizeInBytes()).thenReturn(recordBatchSizeInBytes); doNothing().when(firstBatch).writeTo(capture.capture()); RemoteStorageFetchInfo fetchInfo = new RemoteStorageFetchInfo( 0, true, tp, partitionData, FetchIsolation.HIGH_WATERMARK, false ); try (RemoteLogManager remoteLogManager = new RemoteLogManager( config.remoteLogManagerConfig(), brokerId, logDir, clusterId, time, tp -> Optional.of(mockLog), (topicPartition, offset) -> { }, brokerTopicStats, metrics) { public RemoteStorageManager createRemoteStorageManager() { return rsmManager; } public RemoteLogMetadataManager createRemoteLogMetadataManager() { return remoteLogMetadataManager; } public Optional<RemoteLogSegmentMetadata> fetchRemoteLogSegmentMetadata(TopicPartition topicPartition, int epochForOffset, long offset) { return Optional.of(segmentMetadata); } public RemoteLogInputStream getRemoteLogInputStream(InputStream in) { return remoteLogInputStream; } int lookupPositionForOffset(RemoteLogSegmentMetadata remoteLogSegmentMetadata, long offset) { return 1; } }) { FetchDataInfo fetchDataInfo = remoteLogManager.read(fetchInfo); // Common assertions assertEquals(fetchOffset, fetchDataInfo.fetchOffsetMetadata.messageOffset); assertFalse(fetchDataInfo.firstEntryIncomplete); // FetchIsolation is HIGH_WATERMARK assertEquals(Optional.empty(), fetchDataInfo.abortedTransactions); // Verify that the byte buffer has capacity equal to the size of the first batch assertEquals(recordBatchSizeInBytes, capture.getValue().capacity()); } }
public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { //set OfficeParserConfig if the user hasn't specified one configure(context); // Have the OOXML file processed OOXMLExtractorFactory.parse(stream, handler, metadata, context); }
@Test public void testPowerPointCustomProperties() throws Exception { Metadata metadata = new Metadata(); try (InputStream input = getResourceAsStream("/test-documents/testPPT_custom_props.pptx")) { ContentHandler handler = new BodyContentHandler(-1); ParseContext context = new ParseContext(); context.set(Locale.class, Locale.US); new OOXMLParser().parse(input, handler, metadata, context); } assertEquals("application/vnd.openxmlformats-officedocument.presentationml.presentation", metadata.get(Metadata.CONTENT_TYPE)); assertEquals("JOUVIN ETIENNE", metadata.get(TikaCoreProperties.CREATOR)); assertEquals("EJ04325S", metadata.get(TikaCoreProperties.MODIFIER)); assertEquals("2011-08-22T13:30:53Z", metadata.get(TikaCoreProperties.CREATED)); assertEquals("2011-08-22T13:32:49Z", metadata.get(TikaCoreProperties.MODIFIED)); assertEquals("1", metadata.get(Office.SLIDE_COUNT)); assertEquals("3", metadata.get(Office.WORD_COUNT)); assertEquals("Test extraction properties pptx", metadata.get(TikaCoreProperties.TITLE)); assertEquals("true", metadata.get("custom:myCustomBoolean")); assertEquals("3", metadata.get("custom:myCustomNumber")); assertEquals("MyStringValue", metadata.get("custom:MyCustomString")); assertEquals("2010-12-30T22:00:00Z", metadata.get("custom:MyCustomDate")); assertEquals("2010-12-29T22:00:00Z", metadata.get("custom:myCustomSecondDate")); }
public Record convert(final AbstractWALEvent event) { if (filter(event)) { return createPlaceholderRecord(event); } if (!(event instanceof AbstractRowEvent)) { return createPlaceholderRecord(event); } PipelineTableMetaData tableMetaData = getPipelineTableMetaData(((AbstractRowEvent) event).getTableName()); if (event instanceof WriteRowEvent) { return handleWriteRowEvent((WriteRowEvent) event, tableMetaData); } if (event instanceof UpdateRowEvent) { return handleUpdateRowEvent((UpdateRowEvent) event, tableMetaData); } if (event instanceof DeleteRowEvent) { return handleDeleteRowEvent((DeleteRowEvent) event, tableMetaData); } throw new UnsupportedSQLOperationException(""); }
@Test void assertUnknownTable() { assertInstanceOf(PlaceholderRecord.class, walEventConverter.convert(mockUnknownTableEvent())); }
static void convertInputData(final List<KiePMMLMiningField> notTargetMiningFields, final PMMLRequestData requestData) { logger.debug("convertInputData {} {}", notTargetMiningFields, requestData); Collection<ParameterInfo> requestParams = requestData.getRequestParams(); notTargetMiningFields.forEach(miningField -> { ParameterInfo parameterInfo = requestParams.stream() .filter(paramInfo -> miningField.getName().equals(paramInfo.getName())) .findFirst() .orElse(null); if (parameterInfo != null) { Object originalValue = parameterInfo.getValue(); Object requiredValue = miningField.getDataType().getActualValue(originalValue); parameterInfo.setType(miningField.getDataType().getMappedClass()); parameterInfo.setValue(requiredValue); } }); }
@Test void convertInputDataConvertibles() { List<KiePMMLMiningField> miningFields = IntStream.range(0, 3).mapToObj(i -> { DATA_TYPE dataType = DATA_TYPE.values()[i]; return KiePMMLMiningField.builder("FIELD-" + i, null) .withDataType(dataType) .build(); }) .collect(Collectors.toList()); PMMLRequestData pmmlRequestData = new PMMLRequestData("123", "modelName"); pmmlRequestData.addRequestParam("FIELD-0", 123); pmmlRequestData.addRequestParam("FIELD-1", "123"); pmmlRequestData.addRequestParam("FIELD-2", "1.23"); Map<String, ParameterInfo> mappedRequestParams = pmmlRequestData.getMappedRequestParams(); assertThat(mappedRequestParams.get("FIELD-0").getValue()).isEqualTo(123); assertThat(mappedRequestParams.get("FIELD-1").getValue()).isEqualTo("123"); assertThat(mappedRequestParams.get("FIELD-2").getValue()).isEqualTo("1.23"); PreProcess.convertInputData(miningFields, pmmlRequestData); assertThat(mappedRequestParams.get("FIELD-0").getValue()).isEqualTo("123"); assertThat(mappedRequestParams.get("FIELD-1").getValue()).isEqualTo(123); assertThat(mappedRequestParams.get("FIELD-2").getValue()).isEqualTo(1.23f); }
@Override public CircuitBreaker circuitBreaker(String name) { return circuitBreaker(name, getDefaultConfig()); }
@Test public void testCreateCircuitBreakerWitMapConstructor() { Map<String, CircuitBreakerConfig> map = new HashMap<>(); map.put("testBreaker", CircuitBreakerConfig.ofDefaults()); CircuitBreakerRegistry circuitBreakerRegistry = new InMemoryCircuitBreakerRegistry(map); circuitBreakerRegistry.addConfiguration("testConfig", CircuitBreakerConfig.ofDefaults()); final CircuitBreaker circuitBreaker = circuitBreakerRegistry .circuitBreaker("circuitBreaker", circuitBreakerRegistry.getConfiguration("testConfig").get()); assertThat(circuitBreaker).isNotNull(); }
@Override public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility( TypeSerializerSnapshot<T> oldSerializerSnapshot) { if (!(oldSerializerSnapshot instanceof AvroSerializerSnapshot)) { return TypeSerializerSchemaCompatibility.incompatible(); } AvroSerializerSnapshot<?> oldAvroSerializerSnapshot = (AvroSerializerSnapshot<?>) oldSerializerSnapshot; return resolveSchemaCompatibility(oldAvroSerializerSnapshot.schema, schema); }
@Test void anAvroSnapshotIsCompatibleAfterARoundTrip() throws IOException { AvroSerializer<GenericRecord> serializer = new AvroSerializer<>(GenericRecord.class, FIRST_REQUIRED_LAST_OPTIONAL); AvroSerializerSnapshot<GenericRecord> restored = roundTrip(serializer.snapshotConfiguration()); assertThat(serializer.snapshotConfiguration().resolveSchemaCompatibility(restored)) .is(isCompatibleAsIs()); }
@JsonProperty public List<ReporterFactory> getReporters() { return reporters; }
@Test void canReadDefaultExcludedAndIncludedAttributes() { assertThat(config.getReporters()) .hasSize(3) .element(1) .isInstanceOfSatisfying(CsvReporterFactory.class, csvReporterFactory -> assertThat(csvReporterFactory) .satisfies(factory -> assertThat(factory.getIncludesAttributes()).isEqualTo(EnumSet.allOf(MetricAttribute.class))) .satisfies(factory -> assertThat(factory.getExcludesAttributes()).isEmpty())); }
@Override public void seek(long newPos) { Preconditions.checkState(!closed, "Cannot seek: already closed"); Preconditions.checkArgument(newPos >= 0, "Position is negative: %s", newPos); // this allows a seek beyond the end of the stream but the next read will fail next = newPos; }
@Test public void testSeek() throws Exception { OSSURI uri = new OSSURI(location("seek.dat")); byte[] expected = randomData(1024 * 1024); writeOSSData(uri, expected); try (SeekableInputStream in = new OSSInputStream(ossClient().get(), uri)) { in.seek(expected.length / 2); byte[] actual = new byte[expected.length / 2]; ByteStreams.readFully(in, actual); assertThat(actual) .as("Should have expected seeking stream") .isEqualTo(Arrays.copyOfRange(expected, expected.length / 2, expected.length)); } }
@SuppressWarnings( "fallthrough" ) public static Object truncDate( ScriptEngine actualContext, Bindings actualObject, Object[] ArgList, Object FunctionContext ) { // 2 arguments: truncation of dates to a certain precision // if ( ArgList.length == 2 ) { if ( isNull( ArgList[0] ) ) { return null; } else if ( isUndefined( ArgList[0] ) ) { return undefinedValue; } // This is the truncation of a date... // The second argument specifies the level: ms, s, min, hour, day, month, year // java.util.Date dArg1 = null; Integer level = null; try { dArg1 = (java.util.Date) ArgList[0]; level = (Integer) ArgList[1]; } catch ( Exception e ) { throw new RuntimeException( e.toString() ); } return ScriptAddedFunctions.truncDate( dArg1, level ); } else { throw new RuntimeException( "The function call truncDate requires 2 arguments: a date and a level (int)" ); } }
@Test public void testTruncDate() { Date dateBase = new Date( 118, Calendar.FEBRUARY, 15, 11, 11, 11 ); // 2018-02-15 11:11:11 Calendar c = Calendar.getInstance(); c.set( 2011, Calendar.NOVEMBER, 11, 11, 11, 11 ); // 2011-11-11 11:11:11 c.set( Calendar.MILLISECOND, 11 ); Date rtn = null; Calendar c2 = Calendar.getInstance(); rtn = ScriptAddedFunctions.truncDate( dateBase, 5 ); c2.setTime( rtn ); Assert.assertEquals( Calendar.JANUARY, c2.get( Calendar.MONTH ) ); rtn = ScriptAddedFunctions.truncDate( dateBase, 4 ); c2.setTime( rtn ); Assert.assertEquals( 1, c2.get( Calendar.DAY_OF_MONTH ) ); rtn = ScriptAddedFunctions.truncDate( dateBase, 3 ); c2.setTime( rtn ); Assert.assertEquals( 0, c2.get( Calendar.HOUR_OF_DAY ) ); rtn = ScriptAddedFunctions.truncDate( dateBase, 2 ); c2.setTime( rtn ); Assert.assertEquals( 0, c2.get( Calendar.MINUTE ) ); rtn = ScriptAddedFunctions.truncDate( dateBase, 1 ); c2.setTime( rtn ); Assert.assertEquals( 0, c2.get( Calendar.SECOND ) ); rtn = ScriptAddedFunctions.truncDate( dateBase, 0 ); c2.setTime( rtn ); Assert.assertEquals( 0, c2.get( Calendar.MILLISECOND ) ); try { ScriptAddedFunctions.truncDate( rtn, 6 ); // Should throw exception Assert.fail( "Expected exception - passed in level > 5 to truncDate" ); } catch ( Exception expected ) { // Should get here } try { ScriptAddedFunctions.truncDate( rtn, -7 ); // Should throw exception Assert.fail( "Expected exception - passed in level < 0 to truncDate" ); } catch ( Exception expected ) { // Should get here } }
@Override @Cacheable(cacheNames = RedisKeyConstants.OAUTH_CLIENT, key = "#clientId", unless = "#result == null") public OAuth2ClientDO getOAuth2ClientFromCache(String clientId) { return oauth2ClientMapper.selectByClientId(clientId); }
@Test public void testGetOAuth2ClientFromCache() { // mock 数据 OAuth2ClientDO clientDO = randomPojo(OAuth2ClientDO.class); oauth2ClientMapper.insert(clientDO); // 准备参数 String clientId = clientDO.getClientId(); // 调用,并断言 OAuth2ClientDO dbClientDO = oauth2ClientService.getOAuth2ClientFromCache(clientId); assertPojoEquals(clientDO, dbClientDO); }
@Override public RSet<V> get(final K key) { String keyHash = keyHash(key); final String setName = getValuesName(keyHash); return new RedissonSet<V>(codec, commandExecutor, setName, null) { @Override public RFuture<Boolean> addAsync(V value) { return RedissonSetMultimap.this.putAsync(key, value); } @Override public RFuture<Boolean> addAllAsync(Collection<? extends V> c) { return RedissonSetMultimap.this.putAllAsync(key, c); } @Override public RFuture<Boolean> removeAsync(Object value) { return RedissonSetMultimap.this.removeAsync(key, value); } @Override public RFuture<Boolean> removeAllAsync(Collection<?> c) { if (c.isEmpty()) { return new CompletableFutureWrapper<>(false); } List<Object> args = new ArrayList<Object>(c.size() + 1); args.add(encodeMapKey(key)); encode(args, c); return commandExecutor.evalWriteAsync(RedissonSetMultimap.this.getRawName(), codec, RedisCommands.EVAL_BOOLEAN_AMOUNT, "local count = 0;" + "for i=2, #ARGV, 5000 do " + "count = count + redis.call('srem', KEYS[2], unpack(ARGV, i, math.min(i+4999, table.getn(ARGV)))) " + "end; " + "if count > 0 then " + "if redis.call('scard', KEYS[2]) == 0 then " + "redis.call('hdel', KEYS[1], ARGV[1]); " + "end; " + "return 1;" + "end;" + "return 0; ", Arrays.<Object>asList(RedissonSetMultimap.this.getRawName(), setName), args.toArray()); } @Override public RFuture<Boolean> deleteAsync() { ByteBuf keyState = encodeMapKey(key); return RedissonSetMultimap.this.fastRemoveAsync(Arrays.asList(keyState), Arrays.asList(RedissonSetMultimap.this.getRawName(), setName), RedisCommands.EVAL_BOOLEAN_AMOUNT); } @Override public RFuture<Boolean> clearExpireAsync() { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override public RFuture<Boolean> expireAsync(long timeToLive, TimeUnit timeUnit, String param, String... keys) { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override protected RFuture<Boolean> expireAtAsync(long timestamp, String param, String... keys) { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override public RFuture<Long> remainTimeToLiveAsync() { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override public RFuture<Void> renameAsync(String newName) { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override public RFuture<Boolean> renamenxAsync(String newName) { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } }; }
@Test public void testSize() { RSetMultimap<SimpleKey, SimpleValue> map = redisson.getSetMultimap("test1"); map.put(new SimpleKey("0"), new SimpleValue("1")); map.put(new SimpleKey("0"), new SimpleValue("2")); assertThat(map.size()).isEqualTo(2); map.fastRemove(new SimpleKey("0")); Set<SimpleValue> s = map.get(new SimpleKey("0")); assertThat(s).isEmpty(); assertThat(map.size()).isEqualTo(0); }
static @Nullable Value lookupDocumentValue(Document document, String fieldPath) { OrderByFieldPath resolvedPath = OrderByFieldPath.fromString(fieldPath); // __name__ is a special field and doesn't exist in (top-level) valueMap (see // https://firebase.google.com/docs/firestore/reference/rest/v1/projects.databases.documents#Document). if (resolvedPath.isDocumentName()) { return Value.newBuilder().setReferenceValue(document.getName()).build(); } return findMapValue(new ArrayList<>(resolvedPath.getSegments()), document.getFieldsMap()); }
@Test public void lookupDocumentValue_nestedField() { assertEquals( QueryUtils.lookupDocumentValue(testDocument, "`fo\\`o.m\\`ap`.`bar.key`"), Value.newBuilder().setStringValue("bar.val").build()); }
@Override public void holdObject( Object strongRef ) { resolvedFileObject.holdObject( strongRef ); }
@Test public void testDelegatesHoldObject() { Object strongRef = new Object(); fileObject.holdObject( strongRef ); verify( resolvedFileObject, times( 1 ) ).holdObject( strongRef ); }
@Override public Set<NodeDiskUsageStats> diskUsageStats() { final List<NodeResponse> result = nodes(); return result.stream() .map(node -> NodeDiskUsageStats.create(node.name(), node.role(), node.ip(), node.host(), node.diskUsed(), node.diskTotal(), node.diskUsedPercent())) .collect(Collectors.toSet()); }
@Test void testDiskUsageStats() { doReturn(List.of(NODE_WITH_CORRECT_INFO, NODE_WITH_MISSING_DISK_STATISTICS)).when(catApi).nodes(); final Set<NodeDiskUsageStats> diskUsageStats = clusterAdapter.diskUsageStats(); assertThat(diskUsageStats) .hasSize(1) .noneSatisfy( diskStats -> assertThat(diskStats.name()).isEqualTo("nodeWithMissingDiskStatistics") ) .first() .satisfies( nodeDescr -> { assertThat(nodeDescr.name()).isEqualTo("nodeWithCorrectInfo"); assertThat(nodeDescr.ip()).isEqualTo("182.88.0.2"); assertThat(nodeDescr.roles()).isEqualTo(NodeRole.parseSymbolString("dimr")); assertThat(nodeDescr.diskUsed().getBytes()).isEqualTo(SIUnitParser.parseBytesSizeValue("45gb").getBytes()); assertThat(nodeDescr.diskTotal().getBytes()).isEqualTo(SIUnitParser.parseBytesSizeValue("411.5gb").getBytes()); assertThat(nodeDescr.diskUsedPercent()).isEqualTo(10.95d); } ); }
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { final List<Path> containers = new ArrayList<>(); for(Path file : files.keySet()) { if(containerService.isContainer(file)) { containers.add(file); } else { callback.delete(file); final Path bucket = containerService.getContainer(file); if(file.getType().contains(Path.Type.upload)) { // In-progress multipart upload try { multipartService.delete(new MultipartUpload(file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(NotfoundException ignored) { log.warn(String.format("Ignore failure deleting multipart upload %s", file)); } } else { try { // Always returning 204 even if the key does not exist. Does not return 404 for non-existing keys session.getClient().deleteVersionedObject( file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file)); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file); } } } } for(Path file : containers) { callback.delete(file); try { final String bucket = containerService.getContainer(file).getName(); session.getClient().deleteBucket(bucket); session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file); } } }
@Test public void testDeleteFileVirtualHost() throws Exception { final Path test = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(virtualhost); new S3TouchFeature(virtualhost, acl).touch(test, new TransferStatus()); assertTrue(new S3FindFeature(virtualhost, acl).find(test)); new S3DefaultDeleteFeature(virtualhost).delete(Arrays.asList(test, test), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new S3FindFeature(virtualhost, acl).find(test)); }
@Override public void abortJob(JobContext originalContext, int status) throws IOException { abortJobs(Collections.singletonList(originalContext)); }
@Test public void testAbortJob() throws IOException { HiveIcebergOutputCommitter committer = new HiveIcebergOutputCommitter(); Table table = table(temp.getRoot().getPath(), false); JobConf conf = jobConf(table, 1); writeRecords(table.name(), 1, 0, true, false, conf); committer.abortJob(new JobContextImpl(conf, JOB_ID), JobStatus.State.FAILED); HiveIcebergTestUtils.validateFiles(table, conf, JOB_ID, 0); HiveIcebergTestUtils.validateData(table, Collections.emptyList(), 0); }
@Override public WindowStoreIterator<V> backwardFetch(final K key, final Instant timeFrom, final Instant timeTo) throws IllegalArgumentException { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType); for (final ReadOnlyWindowStore<K, V> windowStore : stores) { try { final WindowStoreIterator<V> result = windowStore.backwardFetch(key, timeFrom, timeTo); if (!result.hasNext()) { result.close(); } else { return result; } } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException( "State store is not available anymore and may have been migrated to another instance; " + "please re-discover its location from the state metadata."); } } return KeyValueIterators.emptyWindowStoreIterator(); }
@Test public void shouldReturnBackwardEmptyIteratorIfNoData() { try (final WindowStoreIterator<String> iterator = windowStore.backwardFetch("my-key", ofEpochMilli(0L), ofEpochMilli(25L))) { assertFalse(iterator.hasNext()); } }
@Override @CacheEvict(cacheNames = RedisKeyConstants.SMS_TEMPLATE, allEntries = true) // allEntries 清空所有缓存,因为 id 不是直接的缓存 code,不好清理 public void deleteSmsTemplate(Long id) { // 校验存在 validateSmsTemplateExists(id); // 更新 smsTemplateMapper.deleteById(id); }
@Test public void testDeleteSmsTemplate_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> smsTemplateService.deleteSmsTemplate(id), SMS_TEMPLATE_NOT_EXISTS); }
@Override public Iterable<DiscoveryNode> discoverNodes() { try { Collection<AzureAddress> azureAddresses = azureClient.getAddresses(); logAzureAddresses(azureAddresses); List<DiscoveryNode> result = new ArrayList<>(); for (AzureAddress azureAddress : azureAddresses) { for (int port = portRange.getFromPort(); port <= portRange.getToPort(); port++) { result.add(createDiscoveryNode(azureAddress, port)); } } return result; } catch (NoCredentialsException e) { if (!isKnownExceptionAlreadyLogged) { LOGGER.warning("No Azure credentials found! Starting standalone. To use Hazelcast Azure discovery, configure" + " properties (client-id, tenant-id, client-secret) or assign a managed identity to the Azure Compute" + " instance"); LOGGER.finest(e); isKnownExceptionAlreadyLogged = true; } } catch (RestClientException e) { if (e.getHttpErrorCode() == HTTP_FORBIDDEN) { if (!isKnownExceptionAlreadyLogged) { LOGGER.warning("Required role is not assigned to service principal! To use Hazelcast Azure discovery assign" + " a role to service principal with correct 'Read' permissions. Starting standalone."); isKnownExceptionAlreadyLogged = true; } LOGGER.finest(e); } else { LOGGER.warning("Cannot discover nodes. Starting standalone.", e); } } catch (Exception e) { LOGGER.warning("Cannot discover nodes. Starting standalone.", e); } return Collections.emptyList(); }
@Test public void discoverNodesEmpty() { // given given(azureClient.getAddresses()).willReturn(new ArrayList<>()); // when Iterable<DiscoveryNode> nodes = azureDiscoveryStrategy.discoverNodes(); // then assertFalse(nodes.iterator().hasNext()); }
public synchronized Topology connectProcessorAndStateStores(final String processorName, final String... stateStoreNames) { internalTopologyBuilder.connectProcessorAndStateStores(processorName, stateStoreNames); return this; }
@Test public void shouldNotAllowZeroStoreNameWhenConnectingProcessorAndStateStores() { assertThrows(TopologyException.class, () -> topology.connectProcessorAndStateStores("processor")); }
@Override public RecordCursor cursor() { return new FieldSetFilteringRecordCursor(delegate.cursor(), fieldSets); }
@Test public void test() { FunctionAndTypeManager functionAndTypeManager = createTestFunctionAndTypeManager(); ArrayType arrayOfBigintType = new ArrayType(BIGINT); FieldSetFilteringRecordSet fieldSetFilteringRecordSet = new FieldSetFilteringRecordSet( functionAndTypeManager, new InMemoryRecordSet( ImmutableList.of(BIGINT, BIGINT, TIMESTAMP_WITH_TIME_ZONE, TIMESTAMP_WITH_TIME_ZONE, arrayOfBigintType, arrayOfBigintType), ImmutableList.of( ImmutableList.of( 100L, 100L, // test same time in different time zone to make sure equal check was done properly packDateTimeWithZone(100, getTimeZoneKeyForOffset(123)), packDateTimeWithZone(100, getTimeZoneKeyForOffset(234)), // test structural type arrayBlockOf(BIGINT, 12, 34, 56), arrayBlockOf(BIGINT, 12, 34, 56)))), ImmutableList.of(ImmutableSet.of(0, 1), ImmutableSet.of(2, 3), ImmutableSet.of(4, 5))); RecordCursor recordCursor = fieldSetFilteringRecordSet.cursor(); assertTrue(recordCursor.advanceNextPosition()); }
@Override public void execute(Exchange exchange) throws SmppException { byte[] message = getShortMessage(exchange.getIn()); ReplaceSm replaceSm = createReplaceSmTempate(exchange); replaceSm.setShortMessage(message); if (log.isDebugEnabled()) { log.debug("Sending replacement command for a short message for exchange id '{}' and message id '{}'", exchange.getExchangeId(), replaceSm.getMessageId()); } try { session.replaceShortMessage( replaceSm.getMessageId(), TypeOfNumber.valueOf(replaceSm.getSourceAddrTon()), NumberingPlanIndicator.valueOf(replaceSm.getSourceAddrNpi()), replaceSm.getSourceAddr(), replaceSm.getScheduleDeliveryTime(), replaceSm.getValidityPeriod(), new RegisteredDelivery(replaceSm.getRegisteredDelivery()), replaceSm.getSmDefaultMsgId(), replaceSm.getShortMessage()); } catch (Exception e) { throw new SmppException(e); } if (log.isDebugEnabled()) { log.debug("Sent replacement command for a short message for exchange id '{}' and message id '{}'", exchange.getExchangeId(), replaceSm.getMessageId()); } Message rspMsg = ExchangeHelper.getResultMessage(exchange); rspMsg.setHeader(SmppConstants.ID, replaceSm.getMessageId()); }
@Test public void bodyWithGSM8bitDataCodingNotModified() throws Exception { final int dataCoding = 0xF7; /* GSM 8-bit class 3 */ byte[] body = { (byte) 0xFF, 'A', 'B', (byte) 0x00, (byte) 0xFF, (byte) 0x7F, 'C', (byte) 0xFF }; Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); exchange.getIn().setHeader(SmppConstants.COMMAND, "ReplaceSm"); exchange.getIn().setHeader(SmppConstants.DATA_CODING, dataCoding); exchange.getIn().setBody(body); command.execute(exchange); verify(session).replaceShortMessage((String) isNull(), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"), (String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)), eq((byte) 0), eq(body)); }
@Override public ExecuteContext after(ExecuteContext context) { KafkaConsumerWrapper kafkaConsumerWrapper = KafkaConsumerController.getKafkaConsumerCache() .get(context.getObject().hashCode()); if (kafkaConsumerWrapper == null) { return context; } if (handler != null) { handler.doAfter(context); } KafkaConsumerController.removeKafkaConsumeCache(kafkaConsumerWrapper.getKafkaConsumer()); LOGGER.info("Remove consumer cache after closing."); return context; }
@Test public void testAfter() { ExecuteContext context = ExecuteContext.forMemberMethod(mockConsumer, null, null, null, null); interceptor.after(context); Assert.assertEquals(0, KafkaConsumerController.getKafkaConsumerCache().values().size()); }
public void generate( String templateName, Row row ) { try { CompiledTemplate template = getTemplate( templateName ); VariableResolverFactory factory = new MapVariableResolverFactory(); Map<String, Object> vars = new HashMap<>(); initializePriorCommaConstraints( vars ); initializeHasPriorJunctionConstraint( vars ); vars.put( "row", row ); for ( Cell cell : row.getCells() ) { cell.addValue( vars ); } String drl = String.valueOf( TemplateRuntime.execute( template, vars, factory, registry ) ); rules.add( drl ); } catch ( Exception e ) { throw new RuntimeException( e ); } }
@Test public void testGenerate() { g.generate("rt2", new Row()); g.generate("rt1", new Row()); String drl = g.getDrl(); assertThat(drl).isEqualTo("Test template 2\n\nTest template 1\n\n"); }
public static byte[] toByteArray(long value, int length) { final byte[] buffer = ByteBuffer.allocate(8).putLong(value).array(); for (int i = 0; i < 8 - length; i++) { if (buffer[i] != 0) { throw new IllegalArgumentException( "Value is does not fit into byte array " + (8 - i) + " > " + length); } } return adjustLength(buffer, length); }
@Test public void toByteArrayLongShouldAddZeros() { assertArrayEquals(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 1}, ByteArrayUtils.toByteArray(1L, 9)); }
public String getWriteDataSource() { return readwriteSplittingGroup.getWriteDataSource(); }
@Test void assertGetWriteDataSource() { ReadwriteSplittingDataSourceGroupRule dataSourceGroupRule = new ReadwriteSplittingDataSourceGroupRule( new ReadwriteSplittingDataSourceGroupRuleConfiguration("test_pr", "write_ds", Arrays.asList("read_ds_0", "read_ds_1"), TransactionalReadQueryStrategy.DYNAMIC, null), TransactionalReadQueryStrategy.DYNAMIC, new RandomLoadBalanceAlgorithm()); String writeDataSourceName = dataSourceGroupRule.getWriteDataSource(); assertThat(writeDataSourceName, is("write_ds")); }
public static JSONObject createObj() { return new JSONObject(); }
@Test public void setStripTrailingZerosTest() { // 默认去除多余的0 final JSONObject jsonObjectDefault = JSONUtil.createObj() .set("test2", 12.00D); assertEquals("{\"test2\":12}", jsonObjectDefault.toString()); // 不去除多余的0 final JSONObject jsonObject = JSONUtil.createObj(JSONConfig.create().setStripTrailingZeros(false)) .set("test2", 12.00D); assertEquals("{\"test2\":12.0}", jsonObject.toString()); // 去除多余的0 jsonObject.getConfig().setStripTrailingZeros(true); assertEquals("{\"test2\":12}", jsonObject.toString()); }
@Override public <PS extends Serializer<P>, P> KeyValueIterator<K, V> prefixScan(final P prefix, final PS prefixKeySerializer) { Objects.requireNonNull(prefix); Objects.requireNonNull(prefixKeySerializer); final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() { @Override public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) { try { return store.prefixScan(prefix, prefixKeySerializer); } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata."); } } }; final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); return new DelegatingPeekingKeyValueIterator<>( storeName, new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction)); }
@Test public void shouldSupportPrefixScan() { stubOneUnderlying.put("a", "a"); stubOneUnderlying.put("aa", "b"); stubOneUnderlying.put("b", "c"); final List<KeyValue<String, String>> results = toList(theStore.prefixScan("a", new StringSerializer())); assertTrue(results.contains(new KeyValue<>("a", "a"))); assertTrue(results.contains(new KeyValue<>("aa", "b"))); assertEquals(2, results.size()); }
public static void smooth(PointList geometry, double maxWindowSize) { if (geometry.size() <= 2) { // geometry consists only of tower nodes, there are no pillar nodes to be smoothed in between return; } // calculate the distance between all points once here to avoid repeated calculation. // for n nodes there are always n-1 edges double[] distances = new double[geometry.size() - 1]; for (int i = 0; i <= geometry.size() - 2; i++) { distances[i] = DistancePlaneProjection.DIST_PLANE.calcDist( geometry.getLat(i), geometry.getLon(i), geometry.getLat(i + 1), geometry.getLon(i + 1) ); } // map that will collect all smoothed elevation values, size is less by 2 // because elevation of start and end point (tower nodes) won't be touched IntDoubleHashMap averagedElevations = new IntDoubleHashMap((geometry.size() - 1) * 4 / 3); // iterate over every pillar node to smooth its elevation // first and last points are left out as they are tower nodes for (int i = 1; i <= geometry.size() - 2; i++) { // first, determine the average window which could be smaller when close to pillar nodes double searchDistance = maxWindowSize / 2.0; double searchDistanceBack = 0.0; for (int j = i - 1; j >= 0; j--) { searchDistanceBack += distances[j]; if (searchDistanceBack > searchDistance) { break; } } // update search distance if pillar node is close to START tower node searchDistance = Math.min(searchDistance, searchDistanceBack); double searchDistanceForward = 0.0; for (int j = i; j < geometry.size() - 1; j++) { searchDistanceForward += distances[j]; if (searchDistanceForward > searchDistance) { break; } } // update search distance if pillar node is close to END tower node searchDistance = Math.min(searchDistance, searchDistanceForward); if (searchDistance <= 0.0) { // there is nothing to smooth. this is an edge case where pillar nodes share exactly the same location // as a tower node. // by doing so we avoid (at least theoretically) a division by zero later in the function call continue; } // area under elevation curve double elevationArea = 0.0; // first going again backwards double distanceBack = 0.0; for (int j = i - 1; j >= 0; j--) { double dist = distances[j]; double searchDistLeft = searchDistance - distanceBack; distanceBack += dist; if (searchDistLeft < dist) { // node lies outside averaging window double elevationDelta = geometry.getEle(j) - geometry.getEle(j + 1); double elevationAtSearchDistance = geometry.getEle(j + 1) + searchDistLeft / dist * elevationDelta; elevationArea += searchDistLeft * (geometry.getEle(j + 1) + elevationAtSearchDistance) / 2.0; break; } else { elevationArea += dist * (geometry.getEle(j + 1) + geometry.getEle(j)) / 2.0; } } // now going forward double distanceForward = 0.0; for (int j = i; j < geometry.size() - 1; j++) { double dist = distances[j]; double searchDistLeft = searchDistance - distanceForward; distanceForward += dist; if (searchDistLeft < dist) { double elevationDelta = geometry.getEle(j + 1) - geometry.getEle(j); double elevationAtSearchDistance = geometry.getEle(j) + searchDistLeft / dist * elevationDelta; elevationArea += searchDistLeft * (geometry.getEle(j) + elevationAtSearchDistance) / 2.0; break; } else { elevationArea += dist * (geometry.getEle(j + 1) + geometry.getEle(j)) / 2.0; } } double elevationAverage = elevationArea / (searchDistance * 2); averagedElevations.put(i, elevationAverage); } // after all pillar nodes got an averaged elevation, elevations are overwritten averagedElevations.forEach((Consumer<IntDoubleCursor>) c -> geometry.setElevation(c.key, c.value)); }
@Test public void testShortWay() { PointList pl = new PointList(3, true); pl.add(47.330741060295594, 10.1571805769575, -100); pl.add(47.33088752836167, 10.157333651129761, -50); pl.add(47.33091499107897, 10.157482223121235, -200); EdgeElevationSmoothingMovingAverage.smooth(pl, 150.0); assertEquals(3, pl.size()); assertEquals(-100, pl.getEle(0), 0.000001); assertEquals((-75 * 20 + -125 * 12) / 32.0, pl.getEle(1), 2); assertEquals(-200, pl.getEle(2), 0.000001); }
public static KStreamHolder<GenericKey> build( final KStreamHolder<?> stream, final StreamSelectKeyV1 selectKey, final RuntimeBuildContext buildContext ) { final LogicalSchema sourceSchema = stream.getSchema(); final CompiledExpression expression = buildExpressionEvaluator( selectKey, buildContext, sourceSchema ); final ProcessingLogger processingLogger = buildContext .getProcessingLogger(selectKey.getProperties().getQueryContext()); final String errorMsg = "Error extracting new key using expression " + selectKey.getKeyExpression(); final Function<GenericRow, Object> evaluator = val -> expression .evaluate(val, null, processingLogger, () -> errorMsg); final LogicalSchema resultSchema = new StepSchemaResolver(buildContext.getKsqlConfig(), buildContext.getFunctionRegistry()).resolve(selectKey, sourceSchema); final KStream<?, GenericRow> kstream = stream.getStream(); final KStream<GenericKey, GenericRow> rekeyed = kstream .filter((key, val) -> val != null && evaluator.apply(val) != null) .selectKey((key, val) -> GenericKey.genericKey(evaluator.apply(val))); return new KStreamHolder<>( rekeyed, resultSchema, ExecutionKeyFactory.unwindowed(buildContext) ); }
@Test public void shouldRekeyCorrectly() { // When: final KStreamHolder<GenericKey> result = selectKey.build(planBuilder, planInfo); // Then: final InOrder inOrder = Mockito.inOrder(kstream, filteredKStream, rekeyedKstream); inOrder.verify(kstream).filter(any()); inOrder.verify(filteredKStream).selectKey(any()); inOrder.verifyNoMoreInteractions(); assertThat(result.getStream(), is(rekeyedKstream)); }
Configuration getEffectiveConfiguration(String[] args) throws CliArgsException { final CommandLine commandLine = cli.parseCommandLineOptions(args, true); final Configuration effectiveConfiguration = new Configuration(baseConfiguration); effectiveConfiguration.addAll(cli.toConfiguration(commandLine)); effectiveConfiguration.set(DeploymentOptions.TARGET, KubernetesSessionClusterExecutor.NAME); return effectiveConfiguration; }
@Test void testKubernetesSessionCliSetsDeploymentTargetCorrectly() throws CliArgsException { final KubernetesSessionCli cli = new KubernetesSessionCli( new Configuration(), confDirPath.toAbsolutePath().toString()); final String[] args = {}; final Configuration configuration = cli.getEffectiveConfiguration(args); assertThat(KubernetesSessionClusterExecutor.NAME) .isEqualTo(configuration.get(DeploymentOptions.TARGET)); }
@Override public UpsertTarget create(ExpressionEvalContext evalContext) { return new PortableUpsertTarget(classDefinition); }
@Test public void test_create() { PortableUpsertTargetDescriptor descriptor = new PortableUpsertTargetDescriptor(new ClassDefinitionBuilder(1, 2, 3).build()); // when UpsertTarget target = descriptor.create(mock()); // then assertThat(target).isInstanceOf(PortableUpsertTarget.class); }
public String parseOnePartToHTML() throws IOException, SAXException, TikaException { // Only get things under html -> body -> div (class=header) XPathParser xhtmlParser = new XPathParser("xhtml", XHTMLContentHandler.XHTML); Matcher divContentMatcher = xhtmlParser.parse("/xhtml:html/xhtml:body/xhtml:div/descendant::node()"); ContentHandler handler = new MatchingContentHandler(new ToXMLContentHandler(), divContentMatcher); AutoDetectParser parser = new AutoDetectParser(); Metadata metadata = new Metadata(); try (InputStream stream = ContentHandlerExample.class.getResourceAsStream("test2.doc")) { parser.parse(stream, handler, metadata); return handler.toString(); } }
@Test public void testParseOnePartToHTML() throws IOException, SAXException, TikaException { String result = example .parseOnePartToHTML() .trim(); assertNotContained("<html", result); assertNotContained("<head>", result); assertNotContained("<meta name=\"dc:creator\"", result); assertNotContained("<title>", result); assertNotContained("<body>", result); assertContains("<p class=\"header\"", result); assertContains("This is in the header", result); assertNotContained("<h1>Test Document", result); assertNotContained("<p>1 2 3", result); }
@Override public void run() { if (!redoService.isConnected()) { LogUtils.NAMING_LOGGER.warn("Grpc Connection is disconnect, skip current redo task"); return; } try { redoForInstances(); redoForSubscribes(); } catch (Exception e) { LogUtils.NAMING_LOGGER.warn("Redo task run with unexpected exception: ", e); } }
@Test void testRunRedoRegisterBatchInstance() throws NacosException { BatchInstanceRedoData redoData = BatchInstanceRedoData.build(SERVICE, GROUP, Collections.singletonList(INSTANCE)); redoData.setRegistered(false); redoData.setUnregistering(false); redoData.setExpectedRegistered(true); Set<InstanceRedoData> mockData = new HashSet<>(); mockData.add(redoData); when(redoService.findInstanceRedoData()).thenReturn(mockData); redoTask.run(); verify(clientProxy).doBatchRegisterService(SERVICE, GROUP, redoData.getInstances()); }
@HighFrequencyInvocation public Optional<EncryptAlgorithm> findQueryEncryptor(final String tableName, final String columnName) { return findEncryptTable(tableName).flatMap(optional -> optional.findQueryEncryptor(columnName)); }
@Test void assertNotFindQueryEncryptor() { assertFalse(new EncryptRule("foo_db", createEncryptRuleConfiguration()).findQueryEncryptor("t_encrypt", "invalid_col").isPresent()); }
@Nullable public V putIfProbablyAbsent(K key, V value) { if (key == null) throw new NullPointerException("key == null"); if (value == null) throw new NullPointerException("value == null"); expungeStaleEntries(); return target.putIfAbsent(new WeakKey<K>(key, this), value); }
@Test void getOrCreate_whenSomeReferencesAreCleared() { map.putIfProbablyAbsent(key, "1"); pretendGCHappened(); map.putIfProbablyAbsent(key, "1"); // we'd expect two distinct entries.. assertThat(map.target.keySet()) .extracting(WeakReference::get) .containsExactlyInAnyOrder(null, key); }
public static org.apache.hadoop.mapred.JobID fromYarn(JobId id) { String identifier = fromClusterTimeStamp(id.getAppId().getClusterTimestamp()); return new org.apache.hadoop.mapred.JobID(identifier, id.getId()); }
@Test public void testFromYarnQueue() { //Define child queue org.apache.hadoop.yarn.api.records.QueueInfo child = Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class); Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING); //Define parent queue org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class); List<org.apache.hadoop.yarn.api.records.QueueInfo> children = new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>(); children.add(child); //Add one child Mockito.when(queueInfo.getChildQueues()).thenReturn(children); Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING); //Call the function we're testing org.apache.hadoop.mapreduce.QueueInfo returned = TypeConverter.fromYarn(queueInfo, new Configuration()); //Verify that the converted queue has the 1 child we had added assertThat(returned.getQueueChildren().size()) .withFailMessage("QueueInfo children weren't properly converted") .isEqualTo(1); }
public static boolean valueToBoolean(int value) { if (TRUE == value) { return true; } else if (FALSE == value) { return false; } else { throw new RuntimeException("Boolean value error, must be 0 or 1"); } }
@Test public void testValueToBoolean() { assertEquals(1, BooleanUtils.booleanToValue(true)); assertEquals(0, BooleanUtils.booleanToValue(false)); }
public static <K, V> Read<K, V> read() { return new AutoValue_KafkaIO_Read.Builder<K, V>() .setTopics(new ArrayList<>()) .setTopicPartitions(new ArrayList<>()) .setConsumerFactoryFn(KafkaIOUtils.KAFKA_CONSUMER_FACTORY_FN) .setConsumerConfig(KafkaIOUtils.DEFAULT_CONSUMER_PROPERTIES) .setMaxNumRecords(Long.MAX_VALUE) .setCommitOffsetsInFinalizeEnabled(false) .setDynamicRead(false) .setTimestampPolicyFactory(TimestampPolicyFactory.withProcessingTime()) .setConsumerPollingTimeout(2L) .setRedistributed(false) .setAllowDuplicates(false) .setRedistributeNumKeys(0) .build(); }
@Test public void testUnboundedSourceWithSingleTopic() { // same as testUnboundedSource, but with single topic int numElements = 1000; String topic = "my_topic"; String bootStrapServer = "none"; KafkaIO.Read<Integer, Long> reader = KafkaIO.<Integer, Long>read() .withBootstrapServers(bootStrapServer) .withTopic("my_topic") .withConsumerFactoryFn( new ConsumerFactoryFn( ImmutableList.of(topic), 10, numElements, OffsetResetStrategy.EARLIEST)) .withMaxNumRecords(numElements) .withKeyDeserializer(IntegerDeserializer.class) .withValueDeserializer(LongDeserializer.class); PCollection<Long> input = p.apply(reader.withoutMetadata()).apply(Values.create()); addCountingAsserts(input, numElements); PipelineResult result = p.run(); assertThat( Lineage.query(result.metrics(), Lineage.Type.SOURCE), hasItem(String.format("kafka:%s.%s", bootStrapServer, topic))); }
@Override public Job save(Job jobToSave) { try (final Connection conn = dataSource.getConnection(); final Transaction transaction = new Transaction(conn)) { final Job savedJob = jobTable(conn).save(jobToSave); transaction.commit(); notifyJobStatsOnChangeListeners(); return savedJob; } catch (SQLException e) { throw new StorageException(e); } }
@Test void saveJob_WhenJobIsNotSavedDueToOtherConcurrentModificationThenThrowConcurrentSqlModificationException() throws SQLException { when(preparedStatement.executeUpdate()).thenReturn(0); assertThatThrownBy(() -> jobStorageProvider.save(anEnqueuedJob().build())).isInstanceOf(ConcurrentJobModificationException.class); }
public ApplicationBuilder qosAcceptForeignIp(Boolean qosAcceptForeignIp) { this.qosAcceptForeignIp = qosAcceptForeignIp; return getThis(); }
@Test void qosAcceptForeignIp() { ApplicationBuilder builder = new ApplicationBuilder(); builder.qosAcceptForeignIp(true); Assertions.assertTrue(builder.build().getQosAcceptForeignIp()); builder.qosAcceptForeignIp(false); Assertions.assertFalse(builder.build().getQosAcceptForeignIp()); builder.qosAcceptForeignIp(null); Assertions.assertNull(builder.build().getQosAcceptForeignIp()); }
public boolean unsetLine(DefaultIssue issue, IssueChangeContext context) { Integer currentValue = issue.line(); if (currentValue != null) { issue.setFieldChange(context, LINE, currentValue, ""); issue.setLine(null); issue.setChanged(true); return true; } return false; }
@Test void unset_line() { int line = 1 + new Random().nextInt(500); issue.setLine(line); boolean updated = underTest.unsetLine(issue, context); assertThat(updated).isTrue(); assertThat(issue.isChanged()).isTrue(); assertThat(issue.line()).isNull(); assertThat(issue.mustSendNotifications()).isFalse(); assertThat(issue.currentChange()) .extracting(f -> f.diffs().size()) .isEqualTo(1); FieldDiffs.Diff diff = issue.currentChange().diffs().get("line"); assertThat(diff.oldValue()).isEqualTo(line); assertThat(diff.newValue()).isEqualTo(""); }
public static Map<String, String> constructKeyValueConfUpdate( CapacitySchedulerConfiguration proposedConf, SchedConfUpdateInfo mutationInfo) throws IOException { Map<String, String> confUpdate = new HashMap<>(); for (String queueToRemove : mutationInfo.getRemoveQueueInfo()) { removeQueue(queueToRemove, proposedConf, confUpdate); } for (QueueConfigInfo addQueueInfo : mutationInfo.getAddQueueInfo()) { addQueue(addQueueInfo, proposedConf, confUpdate); } for (QueueConfigInfo updateQueueInfo : mutationInfo.getUpdateQueueInfo()) { updateQueue(updateQueueInfo, proposedConf, confUpdate); } for (Map.Entry<String, String> global : mutationInfo.getGlobalParams() .entrySet()) { confUpdate.put(global.getKey(), global.getValue()); } return confUpdate; }
@Test public void testRemoveNonExistingQueue() { SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo(); updateInfo.getRemoveQueueInfo().add("root.d"); assertThrows(IOException.class, () -> { ConfigurationUpdateAssembler.constructKeyValueConfUpdate(csConfig, updateInfo); }); }
public static void calculateChunkedSums(int bytesPerSum, int checksumType, ByteBuffer sums, ByteBuffer data) { nativeComputeChunkedSums(bytesPerSum, checksumType, sums, sums.position(), data, data.position(), data.remaining(), "", 0, false); }
@Test public void testCalculateChunkedSumsSuccess() throws ChecksumException { allocateDirectByteBuffers(); fillDataAndValidChecksums(); NativeCrc32.calculateChunkedSums(bytesPerChecksum, checksumType.id, checksums, data); }
@Override public ScalarOperator visitMap(MapOperator operator, Void context) { return shuttleIfUpdate(operator); }
@Test void testMapOperator() { ColumnRefOperator column1 = new ColumnRefOperator(1, INT, "id", true); MapOperator operator = new MapOperator(INT, Lists.newArrayList(column1, column1)); { ScalarOperator newOperator = shuttle.visitMap(operator, null); assertEquals(operator, newOperator); } { ScalarOperator newOperator = shuttle2.visitMap(operator, null); assertEquals(operator, newOperator); } }
public void setContract(@Nullable Produce contract) { this.contract = contract; setStoredContract(contract); handleContractState(); }
@Test public void cabbageContractCabbageDiseasedAndEmptyPatch() { final FarmingPatch patch = farmingGuildPatches.get(Varbits.FARMING_4773); assertNotNull(patch); when(farmingTracker.predictPatch(patch)) .thenReturn(new PatchPrediction(Produce.CABBAGE, CropState.DISEASED, 0, 2, 3)); farmingContractManager.setContract(Produce.CABBAGE); assertEquals(SummaryState.IN_PROGRESS, farmingContractManager.getSummary()); assertEquals(CropState.DISEASED, farmingContractManager.getContractCropState()); }
public ArgumentBuilder callback(Boolean callback) { this.callback = callback; return this; }
@Test void callback() { ArgumentBuilder builder = ArgumentBuilder.newBuilder(); builder.callback(true); Assertions.assertTrue(builder.build().isCallback()); builder.callback(false); Assertions.assertFalse(builder.build().isCallback()); }
Map<String, String> describeInstances(AwsCredentials credentials) { Map<String, String> attributes = createAttributesDescribeInstances(); Map<String, String> headers = createHeaders(attributes, credentials); String response = callAwsService(attributes, headers); return parseDescribeInstances(response); }
@Test public void describeInstancesNoPublicIpNoInstanceName() { // given String requestUrl = "/?Action=DescribeInstances" + "&Filter.1.Name=tag-value" + "&Filter.1.Value.1=some-tag-value" + "&Filter.2.Name=instance.group-name" + "&Filter.2.Value.1=hazelcast" + "&Filter.3.Name=instance-state-name&Filter.3.Value.1=running" + "&Version=2016-11-15"; //language=XML String response = """ <?xml version="1.0" encoding="UTF-8"?> <DescribeInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/"> <reservationSet> <item> <instancesSet> <item> <privateIpAddress>10.0.1.25</privateIpAddress> </item> </instancesSet> </item> <item> <instancesSet> <item> <privateIpAddress>172.31.14.42</privateIpAddress> </item> </instancesSet> </item> </reservationSet> </DescribeInstancesResponse>"""; stubFor(get(urlEqualTo(requestUrl)) .withHeader("X-Amz-Date", equalTo("20200403T102518Z")) .withHeader("Authorization", equalTo(AUTHORIZATION_HEADER)) .withHeader("X-Amz-Security-Token", equalTo(TOKEN)) .willReturn(aResponse().withStatus(HttpURLConnection.HTTP_OK).withBody(response))); // when Map<String, String> result = createAwsEc2Api(null, "some-tag-value").describeInstances(CREDENTIALS); // then assertEquals(2, result.size()); assertNull(result.get("10.0.1.25")); assertNull(result.get("172.31.14.42")); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testReturnAbortedTransactionsInUncommittedMode() { buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 0; currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes())); abortTransaction(buffer, 1L, currentOffset); buffer.flip(); List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList( new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0)); MemoryRecords records = MemoryRecords.readableRecords(buffer); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); // normal fetch assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0)); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); }
@Override protected void refresh() { Iterable<ServerConfig> dbConfigs = serverConfigRepository.findAll(); Map<String, Object> newConfigs = Maps.newHashMap(); //default cluster's configs for (ServerConfig config : dbConfigs) { if (Objects.equals(ConfigConsts.CLUSTER_NAME_DEFAULT, config.getCluster())) { newConfigs.put(config.getKey(), config.getValue()); } } //data center's configs String dataCenter = getCurrentDataCenter(); for (ServerConfig config : dbConfigs) { if (Objects.equals(dataCenter, config.getCluster())) { newConfigs.put(config.getKey(), config.getValue()); } } //cluster's config if (!Strings.isNullOrEmpty(System.getProperty(ConfigConsts.APOLLO_CLUSTER_KEY))) { String cluster = System.getProperty(ConfigConsts.APOLLO_CLUSTER_KEY); for (ServerConfig config : dbConfigs) { if (Objects.equals(cluster, config.getCluster())) { newConfigs.put(config.getKey(), config.getValue()); } } } //put to environment for (Map.Entry<String, Object> config: newConfigs.entrySet()){ String key = config.getKey(); Object value = config.getValue(); if (this.source.get(key) == null) { logger.info("Load config from DB : {} = {}", key, value); } else if (!Objects.equals(this.source.get(key), value)) { logger.info("Load config from DB : {} = {}. Old value = {}", key, value, this.source.get(key)); } this.source.put(key, value); } }
@Test public void testGetDefaultConfig() { propertySource.refresh(); assertEquals(propertySource.getProperty(defaultKey), defaultValue); }
public static List<Criterion> parse(String filter) { return StreamSupport.stream(CRITERIA_SPLITTER.split(filter).spliterator(), false) .map(FilterParser::parseCriterion) .toList(); }
@Test public void parse_filter_having_operator_and_value() { List<Criterion> criterion = FilterParser.parse("ncloc > 10 and coverage <= 80"); assertThat(criterion) .extracting(Criterion::getKey, Criterion::getOperator, Criterion::getValue) .containsOnly( tuple("ncloc", GT, "10"), tuple("coverage", LTE, "80")); }
public static String checkComponentName(String name) { checkArgument(!isNullOrEmpty(name), "Component name can't be empty"); checkArgument(name.length() <= MAX_COMPONENT_NAME_LENGTH, "Component name length (%s) is longer than the maximum authorized (%s). '%s' was provided.", name.length(), MAX_COMPONENT_NAME_LENGTH, name); return name; }
@Test void fail_when_name_longer_than_500_characters() { assertThatThrownBy(() -> ComponentValidator.checkComponentName(repeat("a", 500 + 1))) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Component name length"); }
@VisibleForTesting void colorChatMessage() { final int[] intStack = client.getIntStack(); final String[] stringStack = client.getStringStack(); final int size = client.getStringStackSize(); final int isize = client.getIntStackSize(); final int uid = intStack[isize - 1]; final boolean splitpmbox = intStack[isize - 2] == 1; final MessageNode messageNode = client.getMessages().get(uid); assert messageNode != null : "chat message build for unknown message"; String message = stringStack[size - 2]; final String username = stringStack[size - 3]; final String channel = stringStack[size - 4]; final ChatMessageType chatMessageType = messageNode.getType(); final boolean isChatboxTransparent = client.isResized() && client.getVarbitValue(Varbits.TRANSPARENT_CHATBOX) == 1; Color usernameColor = null; Color channelColor = null; switch (chatMessageType) { // username recoloring for MODPRIVATECHAT, PRIVATECHAT and PRIVATECHATOUT // ChatMessageTypes is handled in the script callback event case TRADEREQ: case AUTOTYPER: case PUBLICCHAT: case MODCHAT: { String sanitizedUsername = Text.removeTags(username).replace('\u00A0', ' '); if (client.getLocalPlayer().getName().equals(sanitizedUsername)) { usernameColor = isChatboxTransparent ? chatColorConfig.transparentPlayerUsername() : chatColorConfig.opaquePlayerUsername(); } else if (client.isFriended(sanitizedUsername, true)) { usernameColor = isChatboxTransparent ? chatColorConfig.transparentPublicFriendUsernames() : chatColorConfig.opaquePublicFriendUsernames(); } else { usernameColor = isChatboxTransparent ? chatColorConfig.transparentUsername() : chatColorConfig.opaqueUsername(); } break; } case FRIENDSCHAT: case FRIENDSCHATNOTIFICATION: usernameColor = isChatboxTransparent ? chatColorConfig.transparentFriendsChatUsernames() : chatColorConfig.opaqueFriendsChatUsernames(); channelColor = isChatboxTransparent ? chatColorConfig.transparentFriendsChatChannelName() : chatColorConfig.opaqueFriendsChatChannelName(); break; case CLAN_CHAT: case CLAN_MESSAGE: case CLAN_GIM_CHAT: case CLAN_GIM_MESSAGE: usernameColor = isChatboxTransparent ? chatColorConfig.transparentClanChatUsernames() : chatColorConfig.opaqueClanChatUsernames(); channelColor = isChatboxTransparent ? chatColorConfig.transparentClanChannelName() : chatColorConfig.opaqueClanChannelName(); break; case CLAN_GUEST_CHAT: case CLAN_GUEST_MESSAGE: usernameColor = isChatboxTransparent ? chatColorConfig.transparentClanChatGuestUsernames() : chatColorConfig.opaqueClanChatGuestUsernames(); channelColor = isChatboxTransparent ? chatColorConfig.transparentClanChannelGuestName() : chatColorConfig.opaqueClanGuestChatChannelName(); break; } if (usernameColor != null) { stringStack[size - 3] = ColorUtil.wrapWithColorTag(username, usernameColor); } if (channelColor != null && !Strings.isNullOrEmpty(channel)) { stringStack[size - 4] = ColorUtil.wrapWithColorTag(channel, channelColor); } String prefix = ""; if (chatMessageType == ChatMessageType.CLAN_GIM_CHAT || chatMessageType == ChatMessageType.CLAN_GIM_MESSAGE) { message = message.substring(1); // remove | prefix = "|"; } if (messageNode.getRuneLiteFormatMessage() != null) { message = formatRuneLiteMessage(messageNode.getRuneLiteFormatMessage(), chatMessageType, splitpmbox); } final Collection<ChatColor> chatColors = colorCache.get(chatMessageType); for (ChatColor chatColor : chatColors) { if (chatColor.isTransparent() != isChatboxTransparent || chatColor.getType() != ChatColorType.NORMAL || chatColor.isDefault()) { continue; } // Replace </col> tags in the message with the new color so embedded </col> won't reset the color final Color color = chatColor.getColor(); message = ColorUtil.wrapWithColorTag( message.replace(ColorUtil.CLOSING_COLOR_TAG, ColorUtil.colorTag(color)), color); break; } stringStack[size - 2] = prefix + message; }
@Test public void testPublicIronmanFriendUsernameRecolouring() { final String localPlayerName = "RuneLite"; final String friendName = "<img=3>BuddhaPuck"; final String sanitizedFriendName = "BuddhaPuck"; when(chatColorConfig.opaquePublicFriendUsernames()).thenReturn(Color.decode("#b20000")); setupVm(ChatMessageType.PUBLICCHAT, friendName, ""); // Setup friend checking Player localPlayer = mock(Player.class); when(client.isFriended(sanitizedFriendName, true)).thenReturn(true); when(client.getLocalPlayer()).thenReturn(localPlayer); when(localPlayer.getName()).thenReturn(localPlayerName); chatMessageManager.colorChatMessage(); assertEquals("<col=b20000>" + friendName + "</col>", sstack[1]); }
@Override public ExplodedPlugin explode(PluginInfo info) { try { File dir = unzipFile(info.getNonNullJarFile()); return explodeFromUnzippedDir(info, info.getNonNullJarFile(), dir); } catch (Exception e) { throw new IllegalStateException(String.format("Fail to open plugin [%s]: %s", info.getKey(), info.getNonNullJarFile().getAbsolutePath()), e); } }
@Test public void retry_on_locked_file() throws IOException { File jar = loadFile("sonar-checkstyle-plugin-2.8.jar"); File lockFile = new File(jar.getParentFile(), jar.getName() + "_unzip.lock"); try (FileOutputStream out = new FileOutputStream(lockFile)) { FileLock lock = out.getChannel().lock(); try { PluginInfo pluginInfo = PluginInfo.create(jar); assertThatExceptionOfType(IllegalStateException.class) .isThrownBy(() -> underTest.explode(pluginInfo)) .withMessage("Fail to open plugin [checkstyle]: " + jar) .withCauseExactlyInstanceOf(IOException.class); } finally { lock.release(); } } }
public String name() { return name; }
@Test void nameCannotBeEmpty() { Assertions.assertThrows(IllegalArgumentException.class, () -> DefaultBot.getDefaultBuilder().name("").build()); }
public static ControllerNodeToNodeId toNodeId() { return INSTANCE; }
@Test public final void testToNodeId() { final Iterable<ControllerNode> nodes = Arrays.asList(CN1, CN2, CN3, null); final List<NodeId> nodeIds = Arrays.asList(NID1, NID2, NID3); assertEquals(nodeIds, FluentIterable.from(nodes) .transform(toNodeId()) .filter(notNull()) .toList()); }
@Override public Serde.Deserializer deserializer(String topic, Serde.Target type) { return new Serde.Deserializer() { @SneakyThrows @Override public DeserializeResult deserialize(RecordHeaders headers, byte[] data) { try { UnknownFieldSet unknownFields = UnknownFieldSet.parseFrom(data); return new DeserializeResult(unknownFields.toString(), DeserializeResult.Type.STRING, Map.of()); } catch (Exception e) { throw new ValidationException(e.getMessage()); } } }; }
@Test void deserializeNestedMessage() { var deserialized = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE) .deserialize(null, getComplexProtobufMessage()); assertThat(deserialized.getResult()).isEqualTo("1: 5\n2: {\n 1: 10\n}\n"); }
public static ObjectNode convertFromGHResponseError(GHResponse ghResponse) { ObjectNode json = JsonNodeFactory.instance.objectNode(); // TODO we could make this more fine grained json.put("code", "InvalidInput"); json.put("message", ghResponse.getErrors().get(0).getMessage()); return json; }
@Test public void testError() { GHResponse rsp = hopper.route(new GHRequest(42.554851, 111.536198, 42.510071, 1.548128).setProfile(profile)); ObjectNode json = NavigateResponseConverter.convertFromGHResponseError(rsp); assertEquals("InvalidInput", json.get("code").asText()); assertTrue(json.get("message").asText().startsWith("Point 0 is out of bounds: 42.554851,111.536198")); }
@Override public PageData<Asset> findAssetsByTenantIdAndCustomerId(UUID tenantId, UUID customerId, PageLink pageLink) { return DaoUtil.toPageData(assetRepository .findByTenantIdAndCustomerId( tenantId, customerId, pageLink.getTextSearch(), DaoUtil.toPageable(pageLink))); }
@Test public void testFindAssetsByTenantIdAndCustomerId() { PageLink pageLink = new PageLink(20, 0, "ASSET_"); PageData<Asset> assets1 = assetDao.findAssetsByTenantIdAndCustomerId(tenantId1, customerId1, pageLink); assertEquals(20, assets1.getData().size()); pageLink = pageLink.nextPageLink(); PageData<Asset> assets2 = assetDao.findAssetsByTenantIdAndCustomerId(tenantId1, customerId1, pageLink); assertEquals(10, assets2.getData().size()); pageLink = pageLink.nextPageLink(); PageData<Asset> assets3 = assetDao.findAssetsByTenantIdAndCustomerId(tenantId1, customerId1, pageLink); assertEquals(0, assets3.getData().size()); }
public Object clone() { ElasticSearchBulkMeta retval = (ElasticSearchBulkMeta) super.clone(); return retval; }
@Test public void testClone() { ElasticSearchBulkMeta esbm = new ElasticSearchBulkMeta(); ElasticSearchBulkMeta esbmClone = (ElasticSearchBulkMeta) esbm.clone(); assertNotNull( esbmClone ); }
@CanIgnoreReturnValue public Replacements add(Replacement replacement) { return add(replacement, CoalescePolicy.REJECT); }
@Test public void duplicate() { Replacements replacements = new Replacements(); replacements.add(Replacement.create(42, 43, "hello")); try { replacements.add(Replacement.create(42, 43, "goodbye")); fail(); } catch (IllegalArgumentException expected) { assertThat(expected).hasMessageThat().contains("conflicts with existing replacement"); } }
public String getArgs() { return args; }
@Test @DirtiesContext public void testCreateEndpointWithArgs() throws Exception { String args = "arg1 arg2 arg3"; // can use space or %20 ExecEndpoint e = createExecEndpoint("exec:test?args=" + args.replaceAll(" ", "%20")); assertEquals(args, e.getArgs()); ExecEndpoint e2 = createExecEndpoint("exec:test?args=" + args); assertEquals(args, e2.getArgs()); }
public static Connection OpenConnection( String serveur, int port, String username, String password, boolean useKey, String keyFilename, String passPhrase, int timeOut, VariableSpace space, String proxyhost, int proxyport, String proxyusername, String proxypassword ) throws KettleException { Connection conn = null; char[] content = null; boolean isAuthenticated = false; try { // perform some checks if ( useKey ) { if ( Utils.isEmpty( keyFilename ) ) { throw new KettleException( BaseMessages.getString( SSHMeta.PKG, "SSH.Error.PrivateKeyFileMissing" ) ); } FileObject keyFileObject = KettleVFS.getFileObject( keyFilename ); if ( !keyFileObject.exists() ) { throw new KettleException( BaseMessages.getString( SSHMeta.PKG, "SSH.Error.PrivateKeyNotExist", keyFilename ) ); } FileContent keyFileContent = keyFileObject.getContent(); CharArrayWriter charArrayWriter = new CharArrayWriter( (int) keyFileContent.getSize() ); try ( InputStream in = keyFileContent.getInputStream() ) { IOUtils.copy( in, charArrayWriter ); } content = charArrayWriter.toCharArray(); } // Create a new connection conn = createConnection( serveur, port ); /* We want to connect through a HTTP proxy */ if ( !Utils.isEmpty( proxyhost ) ) { /* Now connect */ // if the proxy requires basic authentication: if ( !Utils.isEmpty( proxyusername ) ) { conn.setProxyData( new HTTPProxyData( proxyhost, proxyport, proxyusername, proxypassword ) ); } else { conn.setProxyData( new HTTPProxyData( proxyhost, proxyport ) ); } } // and connect if ( timeOut == 0 ) { conn.connect(); } else { conn.connect( null, 0, timeOut * 1000 ); } // authenticate if ( useKey ) { isAuthenticated = conn.authenticateWithPublicKey( username, content, space.environmentSubstitute( passPhrase ) ); } else { isAuthenticated = conn.authenticateWithPassword( username, password ); } if ( isAuthenticated == false ) { throw new KettleException( BaseMessages.getString( SSHMeta.PKG, "SSH.Error.AuthenticationFailed", username ) ); } } catch ( Exception e ) { // Something wrong happened // do not forget to disconnect if connected if ( conn != null ) { conn.close(); } throw new KettleException( BaseMessages.getString( SSHMeta.PKG, "SSH.Error.ErrorConnecting", serveur, username ), e ); } return conn; }
@Test( expected = KettleException.class ) public void testOpenConnection_2() throws Exception { when( connection.authenticateWithPassword( username, password ) ).thenReturn( false ); SSHData.OpenConnection( server, port, username, password, false, null, null, 0, null, null, 0, null, null ); verify( connection ).connect(); verify( connection ).authenticateWithPassword( username, password ); }
public synchronized void connect(CertConfig certConfig) { if (channel != null) { logger.error(INTERNAL_ERROR, "", "", "Dubbo Cert Authority server is already connected."); return; } if (certConfig == null) { // No cert config, return return; } if (StringUtils.isEmpty(certConfig.getRemoteAddress())) { // No remote address configured, return return; } if (StringUtils.isNotEmpty(certConfig.getEnvType()) && !"Kubernetes".equalsIgnoreCase(certConfig.getEnvType())) { throw new IllegalArgumentException("Only support Kubernetes env now."); } // Create gRPC connection connect0(certConfig); this.certConfig = certConfig; // Try to generate cert from remote generateCert(); // Schedule refresh task scheduleRefresh(); }
@Test void test1() { FrameworkModel frameworkModel = new FrameworkModel(); DubboCertManager certManager = new DubboCertManager(frameworkModel) { @Override protected void connect0(CertConfig certConfig) { Assertions.assertEquals("127.0.0.1:30060", certConfig.getRemoteAddress()); Assertions.assertEquals("caCertPath", certConfig.getCaCertPath()); } @Override protected CertPair generateCert() { return null; } @Override protected void scheduleRefresh() {} }; certManager.connect(new CertConfig("127.0.0.1:30060", null, "caCertPath", "oidc")); Assertions.assertEquals(new CertConfig("127.0.0.1:30060", null, "caCertPath", "oidc"), certManager.certConfig); certManager.connect(new CertConfig("127.0.0.1:30060", "Kubernetes", "caCertPath", "oidc123")); Assertions.assertEquals( new CertConfig("127.0.0.1:30060", "Kubernetes", "caCertPath", "oidc123"), certManager.certConfig); certManager.connect(new CertConfig("127.0.0.1:30060", "kubernetes", "caCertPath", "oidc345")); Assertions.assertEquals( new CertConfig("127.0.0.1:30060", "kubernetes", "caCertPath", "oidc345"), certManager.certConfig); CertConfig certConfig = new CertConfig("127.0.0.1:30060", "vm", "caCertPath", "oidc"); Assertions.assertThrows(IllegalArgumentException.class, () -> certManager.connect(certConfig)); Assertions.assertEquals( new CertConfig("127.0.0.1:30060", "kubernetes", "caCertPath", "oidc345"), certManager.certConfig); certManager.connect(null); Assertions.assertEquals( new CertConfig("127.0.0.1:30060", "kubernetes", "caCertPath", "oidc345"), certManager.certConfig); certManager.connect(new CertConfig(null, null, null, null)); Assertions.assertEquals( new CertConfig("127.0.0.1:30060", "kubernetes", "caCertPath", "oidc345"), certManager.certConfig); certManager.channel = Mockito.mock(Channel.class); certManager.connect(new CertConfig("error", null, "error", "error")); Assertions.assertEquals( new CertConfig("127.0.0.1:30060", "kubernetes", "caCertPath", "oidc345"), certManager.certConfig); frameworkModel.destroy(); }
public MessageExt viewMessage(final String addr, final String topic, final long phyoffset, final long timeoutMillis) throws RemotingException, MQBrokerException, InterruptedException { ViewMessageRequestHeader requestHeader = new ViewMessageRequestHeader(); requestHeader.setTopic(topic); requestHeader.setOffset(phyoffset); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.VIEW_MESSAGE_BY_ID, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { ByteBuffer byteBuffer = ByteBuffer.wrap(response.getBody()); MessageExt messageExt = MessageDecoder.clientDecode(byteBuffer, true); //If namespace not null , reset Topic without namespace. if (StringUtils.isNotEmpty(this.clientConfig.getNamespace())) { messageExt.setTopic(NamespaceUtil.withoutNamespace(messageExt.getTopic(), this.clientConfig.getNamespace())); } return messageExt; } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark(), addr); }
@Test public void testViewMessage() throws Exception { doAnswer((Answer<RemotingCommand>) mock -> { RemotingCommand request = mock.getArgument(1); RemotingCommand response = RemotingCommand.createResponseCommand(null); MessageExt message = new MessageExt(); message.setQueueId(0); message.setFlag(12); message.setQueueOffset(0L); message.setCommitLogOffset(100L); message.setSysFlag(0); message.setBornTimestamp(System.currentTimeMillis()); message.setBornHost(new InetSocketAddress("127.0.0.1", 10)); message.setStoreTimestamp(System.currentTimeMillis()); message.setStoreHost(new InetSocketAddress("127.0.0.1", 11)); message.setBody("body".getBytes()); message.setTopic(topic); message.putUserProperty("key", "value"); response.setBody(MessageDecoder.encode(message, false)); response.makeCustomHeaderToNet(); response.setCode(ResponseCode.SUCCESS); response.setOpaque(request.getOpaque()); return response; }).when(remotingClient).invokeSync(anyString(), any(RemotingCommand.class), anyLong()); MessageExt messageExt = mqClientAPI.viewMessage(brokerAddr, "topic", 100L, 10000); assertThat(messageExt.getTopic()).isEqualTo(topic); }
public static DatabaseType getDatabaseType() { Optional<DatabaseType> configuredDatabaseType = findConfiguredDatabaseType(); if (configuredDatabaseType.isPresent()) { return configuredDatabaseType.get(); } MetaDataContexts metaDataContexts = ProxyContext.getInstance().getContextManager().getMetaDataContexts(); if (metaDataContexts.getMetaData().getDatabases().isEmpty()) { return TypedSPILoader.getService(DatabaseType.class, DEFAULT_FRONTEND_DATABASE_PROTOCOL_TYPE); } Optional<ShardingSphereDatabase> database = metaDataContexts.getMetaData().getDatabases().values().stream().filter(ShardingSphereDatabase::containsDataSource).findFirst(); return database.isPresent() ? database.get().getResourceMetaData().getStorageUnits().values().iterator().next().getStorageType() : TypedSPILoader.getService(DatabaseType.class, DEFAULT_FRONTEND_DATABASE_PROTOCOL_TYPE); }
@Test void assertGetDatabaseTypeWhenThrowShardingSphereConfigurationException() { ContextManager contextManager = mockContextManager(Collections.emptyMap(), new Properties()); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); assertThat(FrontDatabaseProtocolTypeFactory.getDatabaseType().getType(), is("MySQL")); }
public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { // Get the mime4j configuration, or use a default one MimeConfig config = new MimeConfig.Builder().setMaxLineLen(100000).setMaxHeaderLen(100000).build(); config = context.get(MimeConfig.class, config); Detector localDetector = context.get(Detector.class); if (localDetector == null) { //lazily load this if necessary if (detector == null) { EmbeddedDocumentUtil embeddedDocumentUtil = new EmbeddedDocumentUtil(context); detector = embeddedDocumentUtil.getDetector(); } localDetector = detector; } MimeStreamParser parser = new MimeStreamParser(config, null, new DefaultBodyDescriptorBuilder()); XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); MailContentHandler mch = new MailContentHandler(xhtml, localDetector, metadata, context, config.isStrictParsing(), extractAllAlternatives); parser.setContentHandler(mch); parser.setContentDecoding(true); parser.setNoRecurse(); xhtml.startDocument(); TikaInputStream tstream = TikaInputStream.get(stream); try { parser.parse(tstream); } catch (IOException e) { tstream.throwIfCauseOf(e); throw new TikaException("Failed to parse an email message", e); } catch (MimeException e) { // Unwrap the exception in case it was not thrown by mime4j Throwable cause = e.getCause(); if (cause instanceof TikaException) { throw (TikaException) cause; } else if (cause instanceof SAXException) { throw (SAXException) cause; } else { throw new TikaException("Failed to parse an email message", e); } } xhtml.endDocument(); }
@Test public void testI18NHeaders() { Metadata metadata = new Metadata(); InputStream stream = getStream("test-documents/testRFC822_i18nheaders"); ContentHandler handler = mock(DefaultHandler.class); try { EXTRACT_ALL_ALTERNATIVES_PARSER.parse(stream, handler, metadata, new ParseContext()); //tests correct decoding of internationalized headers, both //quoted-printable (Q) and Base64 (B). assertEquals("Keld J\u00F8rn Simonsen <keld@dkuug.dk>", metadata.get(TikaCoreProperties.CREATOR)); assertEquals("If you can read this you understand the example.", metadata.get(TikaCoreProperties.TITLE)); assertEquals("If you can read this you understand the example.", metadata.get(TikaCoreProperties.SUBJECT)); } catch (Exception e) { fail("Exception thrown: " + e.getMessage()); } }
protected final Optional<Method> getInvokerMethod(String methodName, Function<? super String, ? extends Method> mappingFunction) { if (methodName == null) { return Optional.empty(); } if (mappingFunction == null) { return Optional.ofNullable(cacheMethods.get(methodName)); } return Optional.ofNullable(cacheMethods.computeIfAbsent(methodName, mappingFunction)); }
@Test public void testCache() throws Exception { final ReflectMethodCacheSupport reflectMethodCacheSupport = new ReflectMethodCacheSupport(); String methodName = "test"; final Method toString = reflectMethodCacheSupport.getInvokerMethod(methodName, fn -> { try { return Object.class.getDeclaredMethod("toString"); } catch (NoSuchMethodException ignored) { // ignored } return null; }).get(); Assert.assertNotNull(toString.invoke(reflectMethodCacheSupport)); }
public Histogram histogram(String name) { return getOrAdd(name, MetricBuilder.HISTOGRAMS); }
@Test public void accessingAHistogramRegistersAndReusesIt() { final Histogram histogram1 = registry.histogram("thing"); final Histogram histogram2 = registry.histogram("thing"); assertThat(histogram1) .isSameAs(histogram2); verify(listener).onHistogramAdded("thing", histogram1); }
public int compareNodePositions() { if(beginPath.length == 0 && endPath.length == 0) return 0; if(beginPath.length == 0) return -1; if(endPath.length == 0) return 1; return Integer.compare(beginPath[0], endPath[0]); }
@Test public void compareNodesWithSameParent(){ final NodeModel parent = root(); final NodeModel node1 = new NodeModel("node1", map); parent.insert(node1); final NodeModel node2 = new NodeModel("node2", map); parent.insert(node2); final int compared = new NodeRelativePath(node1, node2).compareNodePositions(); assertTrue(compared < 0); }
public void tryMergeIssuesFromSourceBranchOfPullRequest(Component component, Collection<DefaultIssue> newIssues, Input<DefaultIssue> rawInput) { if (sourceBranchInputFactory.hasSourceBranchAnalysis()) { Input<DefaultIssue> sourceBranchInput = sourceBranchInputFactory.createForSourceBranch(component); DefaultTrackingInput rawPrTrackingInput = new DefaultTrackingInput(newIssues, rawInput.getLineHashSequence(), rawInput.getBlockHashSequence()); Tracking<DefaultIssue, DefaultIssue> prTracking = tracker.trackNonClosed(rawPrTrackingInput, sourceBranchInput); for (Map.Entry<DefaultIssue, DefaultIssue> pair : prTracking.getMatchedRaws().entrySet()) { issueLifecycle.copyExistingIssueFromSourceBranchToPullRequest(pair.getKey(), pair.getValue()); } } }
@Test public void tryMergeIssuesFromSourceBranchOfPullRequest_does_nothing_if_source_branch_was_not_analyzed() { when(sourceBranchInputFactory.hasSourceBranchAnalysis()).thenReturn(false); underTest.tryMergeIssuesFromSourceBranchOfPullRequest(FILE_1, rawIssuesInput.getIssues(), rawIssuesInput); verifyNoInteractions(issueLifecycle); }
public static RemoveRootNodeResult removeRootNode(Expression expr) { return findRemoveRootNodeViaScope(expr); }
@Test public void removeRootNodeTest() { assertThat(findRemoveRootNodeViaScope(expr("sum"))).isEqualTo(new RemoveRootNodeResult(of(expr("sum")), expr("sum"), expr("sum"))); assertThat(findRemoveRootNodeViaScope(expr("$a.getAge()"))).isEqualTo(new RemoveRootNodeResult(of(expr("$a")), expr("getAge()"), expr("getAge()"))); assertThat(findRemoveRootNodeViaScope(expr("$c.convert($length)"))).isEqualTo(new RemoveRootNodeResult(of(expr("$c")), expr("convert($length)"), expr("convert($length)"))); assertThat(findRemoveRootNodeViaScope(expr("$data.getValues().get(0)"))).isEqualTo(new RemoveRootNodeResult(of(expr("$data")), expr("getValues().get(0)"), expr("getValues()"))); assertThat(findRemoveRootNodeViaScope(expr("$data.getIndexes().getValues().get(0)"))).isEqualTo(new RemoveRootNodeResult(of(expr("$data")), expr("getIndexes().getValues().get(0)"), expr("getIndexes()"))); }
public static GtidSet mergeGtidSetInto(GtidSet base, GtidSet toMerge) { Map<String, GtidSet.UUIDSet> newSet = new HashMap<>(); base.getUUIDSets().forEach(uuidSet -> newSet.put(uuidSet.getUUID(), uuidSet)); for (GtidSet.UUIDSet uuidSet : toMerge.getUUIDSets()) { if (!newSet.containsKey(uuidSet.getUUID())) { newSet.put(uuidSet.getUUID(), uuidSet); } } return new GtidSet(newSet); }
@Test void testMergingGtidSets() { GtidSet base = new GtidSet("A:1-100"); GtidSet toMerge = new GtidSet("A:1-10"); assertThat(mergeGtidSetInto(base, toMerge).toString()).isEqualTo("A:1-100"); base = new GtidSet("A:1-100"); toMerge = new GtidSet("B:1-10"); assertThat(mergeGtidSetInto(base, toMerge).toString()).isEqualTo("A:1-100,B:1-10"); base = new GtidSet("A:1-100,C:1-100"); toMerge = new GtidSet("A:1-10,B:1-10"); assertThat(mergeGtidSetInto(base, toMerge).toString()).isEqualTo("A:1-100,B:1-10,C:1-100"); }
@Bean public PluginDataHandler motanPluginDataHandler() { return new MotanPluginDataHandler(); }
@Test public void testMotanPluginDataHandler() { applicationContextRunner.run(context -> { PluginDataHandler handler = context.getBean("motanPluginDataHandler", PluginDataHandler.class); assertNotNull(handler); } ); }
static void setHeader(Message message, String name, String value) { MessageProperties properties = message.getMessageProperties(); if (properties == null) return; properties.setHeader(name, value); }
@Test void setHeader_replace() { message.getMessageProperties().setHeader("b3", (byte) 0); MessageHeaders.setHeader(message, "b3", "1"); assertThat((String) message.getMessageProperties().getHeader("b3")) .isEqualTo("1"); }
@PostMapping @PreAuthorize("hasAnyAuthority('ADMIN')") public CustomResponse<String> createProduct(@RequestBody @Valid final ProductCreateRequest productCreateRequest) { final Product createdProduct = productCreateService .createProduct(productCreateRequest); return CustomResponse.successOf(createdProduct.getId()); }
@Test void givenProductCreateRequest_whenProductCreatedFromAdmin_thenReturnCustomResponseWithProductId() throws Exception { // Given String productName = "Test Product"; ProductCreateRequest productCreateRequest = ProductCreateRequest.builder() .name(productName) .unitPrice(BigDecimal.valueOf(12)) .amount(BigDecimal.valueOf(5)) .build(); Product expected = Product.builder() .id(UUID.randomUUID().toString()) .name(productCreateRequest.getName()) .unitPrice(productCreateRequest.getUnitPrice()) .amount(productCreateRequest.getAmount()) .build(); // When when(productCreateService.createProduct(any(ProductCreateRequest.class))).thenReturn(expected); // Then mockMvc.perform(MockMvcRequestBuilders.post("/api/v1/products") .contentType(MediaType.APPLICATION_JSON) .content(objectMapper.writeValueAsString(productCreateRequest)) .header(HttpHeaders.AUTHORIZATION, "Bearer " + mockAdminToken.getAccessToken())) .andDo(MockMvcResultHandlers.print()) .andExpect(MockMvcResultMatchers.status().isOk()) .andExpect(MockMvcResultMatchers.jsonPath("$.httpStatus").value("OK")) .andExpect(MockMvcResultMatchers.jsonPath("$.isSuccess").value(true)) .andExpect(MockMvcResultMatchers.jsonPath("$.response").value(expected.getId())); // Verify verify(productCreateService, times(1)).createProduct(any(ProductCreateRequest.class)); }
@Override public void run() { if (processor != null) { processor.execute(); } else { if (!beforeHook()) { logger.info("before-feature hook returned [false], aborting: {}", this); } else { scenarios.forEachRemaining(this::processScenario); } afterFeature(); } }
@Test void testOutlineGenerator() { run("outline-generator.feature"); }
@Override public void isNotEqualTo(@Nullable Object expected) { super.isNotEqualTo(expected); }
@Test public void isNotEqualTo_WithoutToleranceParameter_Success_PlusMinusZero() { assertThat(array(0.0f)).isNotEqualTo(array(-0.0f)); }
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image) throws IOException { if (isGrayImage(image)) { return createFromGrayImage(image, document); } // We try to encode the image with predictor if (USE_PREDICTOR_ENCODER) { PDImageXObject pdImageXObject = new PredictorEncoder(document, image).encode(); if (pdImageXObject != null) { if (pdImageXObject.getColorSpace() == PDDeviceRGB.INSTANCE && pdImageXObject.getBitsPerComponent() < 16 && image.getWidth() * image.getHeight() <= 50 * 50) { // also create classic compressed image, compare sizes PDImageXObject pdImageXObjectClassic = createFromRGBImage(image, document); if (pdImageXObjectClassic.getCOSObject().getLength() < pdImageXObject.getCOSObject().getLength()) { pdImageXObject.getCOSObject().close(); return pdImageXObjectClassic; } else { pdImageXObjectClassic.getCOSObject().close(); } } return pdImageXObject; } } // Fallback: We export the image as 8-bit sRGB and might lose color information return createFromRGBImage(image, document); }
@Test void testCreateLosslessFromImageCMYK() throws IOException { PDDocument document = new PDDocument(); BufferedImage image = ImageIO.read(this.getClass().getResourceAsStream("png.png")); final ColorSpace targetCS = new ICC_ColorSpace(ICC_Profile .getInstance(this.getClass().getResourceAsStream("/org/apache/pdfbox/resources/icc/ISOcoated_v2_300_bas.icc"))); ColorConvertOp op = new ColorConvertOp(image.getColorModel().getColorSpace(), targetCS, null); BufferedImage imageCMYK = op.filter(image, null); PDImageXObject ximage = LosslessFactory.createFromImage(document, imageCMYK); validate(ximage, 8, imageCMYK.getWidth(), imageCMYK.getHeight(), "png", "ICCBased"); doWritePDF(document, ximage, TESTRESULTSDIR, "cmyk.pdf"); // still slight difference of 1 color level //checkIdent(imageCMYK, ximage.getImage()); }
void append(Buffer buffer, int subpartitionId, boolean flush) { subpartitionCacheManagers[subpartitionId].append(buffer); increaseNumCachedBytesAndCheckFlush(buffer.readableBytes(), flush); }
@Test void testAppend() { int numAddBuffers = 100; int maxBufferSizeBytes = 100; int subpartitionId = 0; Random random = new Random(); TestingTieredStorageMemoryManager memoryManager = new TestingTieredStorageMemoryManager.Builder().build(); AtomicInteger numReceivedBuffers = new AtomicInteger(0); AtomicInteger numReceivedBytes = new AtomicInteger(0); TestingPartitionFileWriter partitionFileWriter = new TestingPartitionFileWriter.Builder() .setWriteFunction( (partitionId, subpartitionBufferContexts) -> { Tuple2<Integer, Integer> numBuffersAndBytes = getNumReceivedBuffersAndBytes( subpartitionBufferContexts); numReceivedBuffers.getAndAdd(numBuffersAndBytes.f0); numReceivedBytes.getAndAdd(numBuffersAndBytes.f1); return FutureUtils.completedVoidFuture(); }) .build(); DiskCacheManager diskCacheManager = new DiskCacheManager( TieredStorageIdMappingUtils.convertId(new ResultPartitionID()), 1, 1024, memoryManager, partitionFileWriter); // Append the buffers to the disk cache manager int numExpectBytes = 0; for (int i = 0; i < numAddBuffers; i++) { int bufferSizeBytes = random.nextInt(maxBufferSizeBytes) + 1; numExpectBytes += bufferSizeBytes; diskCacheManager.append( BufferBuilderTestUtils.buildSomeBuffer(bufferSizeBytes), subpartitionId, true); } assertThat(diskCacheManager.getBufferIndex(subpartitionId)).isEqualTo(numAddBuffers); diskCacheManager.close(); assertThat(numReceivedBuffers).hasValue(numAddBuffers); assertThat(numReceivedBytes).hasValue(numExpectBytes); }
@Override protected void sendHealthCheck() { send(HEALTH_CHECK_REQUEST); }
@Test public void testSendHealthCheck() { TestGetWorkMetadataRequestObserver requestObserver = Mockito.spy(new TestGetWorkMetadataRequestObserver()); GetWorkerMetadataTestStub testStub = new GetWorkerMetadataTestStub(requestObserver); stream = getWorkerMetadataTestStream(testStub, 0, new TestWindmillEndpointsConsumer()); stream.sendHealthCheck(); verify(requestObserver).onNext(WorkerMetadataRequest.getDefaultInstance()); }
@Override public void pluginUnLoaded(GoPluginDescriptor pluginDescriptor) { metadataStore.removeMetadataFor(pluginDescriptor.id()); }
@Test public void shouldRemoveMetadataOnPluginUnLoadedCallback() { GoPluginDescriptor pluginDescriptor = GoPluginDescriptor.builder().id("plugin-id").isBundledPlugin(true).build(); PluginSettingsMetadataStore.getInstance().addMetadataFor(pluginDescriptor.id(), PluginConstants.NOTIFICATION_EXTENSION, new PluginSettingsConfiguration(), "template"); metadataLoader.pluginUnLoaded(pluginDescriptor); assertThat(PluginSettingsMetadataStore.getInstance().hasPlugin(pluginDescriptor.id())).isFalse(); }
@ThriftField(1) public List<PrestoThriftRange> getRanges() { return ranges; }
@Test public void testFromValueSetOfRangesBounded() { PrestoThriftValueSet thriftValueSet = fromValueSet(ValueSet.ofRanges( range(BIGINT, -10L, true, -1L, false), range(BIGINT, -1L, false, 100L, true))); assertNotNull(thriftValueSet.getRangeValueSet()); assertEquals(thriftValueSet.getRangeValueSet().getRanges(), ImmutableList.of( new PrestoThriftRange(new PrestoThriftMarker(longValue(-10), EXACTLY), new PrestoThriftMarker(longValue(-1), BELOW)), new PrestoThriftRange(new PrestoThriftMarker(longValue(-1), ABOVE), new PrestoThriftMarker(longValue(100), EXACTLY)))); }
@Override public String getName() { return TransformFunctionType.OR.getName(); }
@Test public void testOrNullLiteral() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("or(%s,null)", INT_SV_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertTrue(transformFunction instanceof OrOperatorTransformFunction); Assert.assertEquals(transformFunction.getName(), TransformFunctionType.OR.getName()); int[] expectedValues = new int[NUM_ROWS]; RoaringBitmap roaringBitmap = new RoaringBitmap(); for (int i = 0; i < NUM_ROWS; i++) { if (_intSVValues[i] != 0) { expectedValues[i] = 1; } else { roaringBitmap.add(i); } } testTransformFunctionWithNull(transformFunction, expectedValues, roaringBitmap); }
public byte[] getXAttr(Path path, String name) throws IOException { return retrieveHeaders(path, INVOCATION_XATTR_GET_NAMED).get(name); }
@Test public void test404() throws Throwable { intercept(FileNotFoundException.class, () -> headerProcessing.getXAttr(new Path(FINAL_FILE), XA_MAGIC_MARKER)); }
static boolean fieldMatchCaseInsensitive(Object repoObj, Object filterObj) { return fieldMatch(repoObj, filterObj) || compareIgnoreCaseOnlyIfStringType(repoObj, filterObj); }
@Test public void testFieldMatchWithNonEqualObjectsShouldReturnFalse() { assertFalse(Utilities.fieldMatchCaseInsensitive("repoObject", "differentObject")); }
@Override protected void route(List<SendingMailbox> sendingMailboxes, TransferableBlock block) throws Exception { sendBlock(sendingMailboxes.get(0), block); }
@Test(expectedExceptions = IllegalArgumentException.class) public void shouldThrowWhenSingletonWithMultipleMailboxes() throws Exception { // Given: ImmutableList<SendingMailbox> destinations = ImmutableList.of(_mailbox1, _mailbox3); // When: new SingletonExchange(destinations, TransferableBlockUtils::splitBlock).route(destinations, _block); }
static void setStaticGetter(final CompilationDTO<RegressionModel> compilationDTO, final ClassOrInterfaceDeclaration modelTemplate, final String nestedTable) { KiePMMLModelFactoryUtils.initStaticGetter(compilationDTO, modelTemplate); final BlockStmt body = getMethodDeclarationBlockStmt(modelTemplate, GET_MODEL); final VariableDeclarator variableDeclarator = getVariableDeclarator(body, TO_RETURN).orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, TO_RETURN, body))); final MethodCallExpr initializer = variableDeclarator.getInitializer() .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE, TO_RETURN, body))) .asMethodCallExpr(); MethodCallExpr methodCallExpr = new MethodCallExpr(); methodCallExpr.setScope(new NameExpr(nestedTable)); methodCallExpr.setName(GETKIEPMML_TABLE); getChainedMethodCallExprFrom("withAbstractKiePMMLTable", initializer).setArgument(0, methodCallExpr); }
@Test void setStaticGetter() throws IOException { String nestedTable = "NestedTable"; MINING_FUNCTION miningFunction = MINING_FUNCTION.byName(regressionModel.getMiningFunction().value()); final ClassOrInterfaceDeclaration modelTemplate = MODEL_TEMPLATE.clone(); final CommonCompilationDTO<RegressionModel> source = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME, pmml, regressionModel, new PMMLCompilationContextMock(), "fileName"); final RegressionCompilationDTO compilationDTO = RegressionCompilationDTO.fromCompilationDTORegressionTablesAndNormalizationMethod(source, new ArrayList<>(), regressionModel.getNormalizationMethod()); KiePMMLRegressionModelFactory.setStaticGetter(compilationDTO, modelTemplate, nestedTable); Map<Integer, Expression> superInvocationExpressionsMap = new HashMap<>(); superInvocationExpressionsMap.put(0, new NameExpr(String.format("\"%s\"", regressionModel.getModelName()))); Map<String, Expression> assignExpressionMap = new HashMap<>(); assignExpressionMap.put("targetField", new StringLiteralExpr(targetMiningField.getName())); assignExpressionMap.put("miningFunction", new NameExpr(miningFunction.getClass().getName() + "." + miningFunction.name())); assignExpressionMap.put("pmmlMODEL", new NameExpr(PMML_MODEL.class.getName() + "." + PMML_MODEL.REGRESSION_MODEL.name())); MethodCallExpr methodCallExpr = new MethodCallExpr(); methodCallExpr.setScope(new NameExpr(nestedTable)); methodCallExpr.setName(GETKIEPMML_TABLE); assignExpressionMap.put("regressionTable", methodCallExpr); MethodDeclaration retrieved = modelTemplate.getMethodsByName(GET_MODEL).get(0); String text = getFileContent(TEST_01_SOURCE); MethodDeclaration expected = JavaParserUtils.parseMethod(text); assertThat(expected.toString()).isEqualTo(retrieved.toString()); assertThat(JavaParserUtils.equalsNode(expected, retrieved)).isTrue(); }
public String getRandomPhysicalDataSourceName() { return getRandomPhysicalDatabaseAndDataSourceName()[1]; }
@Test void assertGetRandomPhysicalDataSourceNameFromContextManager() { String actual = databaseConnectionManager.getRandomPhysicalDataSourceName(); assertTrue(Arrays.asList(DefaultDatabase.LOGIC_NAME, "ds", "invalid_ds").contains(actual)); }
@Override public void enableCaching() { CACHING_ENABLED.set(Boolean.TRUE); }
@Test void openSession_with_caching_returns_wrapper_of_MyBatis_DbSession_which_delegates_all_methods_but_close() { boolean batchOrRegular = random.nextBoolean(); underTest.enableCaching(); verifyFirstDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { dbSession.rollback(); verify(myBatisDbSession).rollback(); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { boolean flag = random.nextBoolean(); dbSession.rollback(flag); verify(myBatisDbSession).rollback(flag); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { dbSession.commit(); verify(myBatisDbSession).commit(); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { boolean flag = random.nextBoolean(); dbSession.commit(flag); verify(myBatisDbSession).commit(flag); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); dbSession.selectOne(str); verify(myBatisDbSession).selectOne(str); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); Object object = new Object(); dbSession.selectOne(str, object); verify(myBatisDbSession).selectOne(str, object); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); dbSession.selectList(str); verify(myBatisDbSession).selectList(str); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); Object object = new Object(); dbSession.selectList(str, object); verify(myBatisDbSession).selectList(str, object); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); Object parameter = new Object(); RowBounds rowBounds = new RowBounds(); dbSession.selectList(str, parameter, rowBounds); verify(myBatisDbSession).selectList(str, parameter, rowBounds); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); String mapKey = randomAlphabetic(10); dbSession.selectMap(str, mapKey); verify(myBatisDbSession).selectMap(str, mapKey); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); Object parameter = new Object(); String mapKey = randomAlphabetic(10); dbSession.selectMap(str, parameter, mapKey); verify(myBatisDbSession).selectMap(str, parameter, mapKey); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); Object parameter = new Object(); String mapKey = randomAlphabetic(10); RowBounds rowBounds = new RowBounds(); dbSession.selectMap(str, parameter, mapKey, rowBounds); verify(myBatisDbSession).selectMap(str, parameter, mapKey, rowBounds); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); ResultHandler handler = mock(ResultHandler.class); dbSession.select(str, handler); verify(myBatisDbSession).select(str, handler); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); Object parameter = new Object(); ResultHandler handler = mock(ResultHandler.class); dbSession.select(str, parameter, handler); verify(myBatisDbSession).select(str, parameter, handler); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); Object parameter = new Object(); ResultHandler handler = mock(ResultHandler.class); RowBounds rowBounds = new RowBounds(); dbSession.select(str, parameter, rowBounds, handler); verify(myBatisDbSession).select(str, parameter, rowBounds, handler); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); dbSession.insert(str); verify(myBatisDbSession).insert(str); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); Object object = new Object(); dbSession.insert(str, object); verify(myBatisDbSession).insert(str, object); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); dbSession.update(str); verify(myBatisDbSession).update(str); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); Object object = new Object(); dbSession.update(str, object); verify(myBatisDbSession).update(str, object); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); dbSession.delete(str); verify(myBatisDbSession).delete(str); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { String str = randomAlphabetic(10); Object object = new Object(); dbSession.delete(str, object); verify(myBatisDbSession).delete(str, object); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { dbSession.flushStatements(); verify(myBatisDbSession).flushStatements(); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { dbSession.clearCache(); verify(myBatisDbSession).clearCache(); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { Configuration expected = mock(Configuration.class); when(myBatisDbSession.getConfiguration()).thenReturn(expected); assertThat(dbSession.getConfiguration()).isSameAs(expected); verify(myBatisDbSession).getConfiguration(); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { Class<Object> clazz = Object.class; Object expected = new Object(); when(myBatisDbSession.getMapper(clazz)).thenReturn(expected); assertThat(dbSession.getMapper(clazz)).isSameAs(expected); verify(myBatisDbSession).getMapper(clazz); }); verifyDelegation(batchOrRegular, (myBatisDbSession, dbSession) -> { Connection connection = mock(Connection.class); when(myBatisDbSession.getConnection()).thenReturn(connection); assertThat(dbSession.getConnection()).isSameAs(connection); verify(myBatisDbSession).getConnection(); }); }
@Override public String getCiName() { return ciName; }
@Test public void getNam_for_undetected_ci() { assertThat(new CiConfigurationProvider.EmptyCiConfiguration().getCiName()).isEqualTo("undetected"); }
static boolean isOpenElement(String text) { XmlPullParser parser; try { parser = PacketParserUtils.getParserFor(text); QName qname = parser.getQName(); return qname.equals(WebSocketOpenElement.QNAME); } catch (XmlPullParserException | IOException e) { LOGGER.log(Level.WARNING, "Could not inspect \"" + text + "\" for open element", e); return false; } }
@Test public void isOpenElementTest() { assertTrue(AbstractWebSocket.isOpenElement(OPEN_ELEMENT)); assertTrue(AbstractWebSocket.isOpenElement(OPEN_ELEMENT_EXPANDED)); assertFalse(AbstractWebSocket.isOpenElement(OPEN_STREAM)); }
@Override public PageResult<SmsTemplateDO> getSmsTemplatePage(SmsTemplatePageReqVO pageReqVO) { return smsTemplateMapper.selectPage(pageReqVO); }
@Test public void testGetSmsTemplatePage() { // mock 数据 SmsTemplateDO dbSmsTemplate = randomPojo(SmsTemplateDO.class, o -> { // 等会查询到 o.setType(SmsTemplateTypeEnum.PROMOTION.getType()); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setCode("tudou"); o.setContent("芋道源码"); o.setApiTemplateId("yunai"); o.setChannelId(1L); o.setCreateTime(buildTime(2021, 11, 11)); }); smsTemplateMapper.insert(dbSmsTemplate); // 测试 type 不匹配 smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setType(SmsTemplateTypeEnum.VERIFICATION_CODE.getType()))); // 测试 status 不匹配 smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 测试 code 不匹配 smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setCode("yuanma"))); // 测试 content 不匹配 smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setContent("源码"))); // 测试 apiTemplateId 不匹配 smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setApiTemplateId("nai"))); // 测试 channelId 不匹配 smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setChannelId(2L))); // 测试 createTime 不匹配 smsTemplateMapper.insert(ObjectUtils.cloneIgnoreId(dbSmsTemplate, o -> o.setCreateTime(buildTime(2021, 12, 12)))); // 准备参数 SmsTemplatePageReqVO reqVO = new SmsTemplatePageReqVO(); reqVO.setType(SmsTemplateTypeEnum.PROMOTION.getType()); reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus()); reqVO.setCode("tu"); reqVO.setContent("芋道"); reqVO.setApiTemplateId("yu"); reqVO.setChannelId(1L); reqVO.setCreateTime(buildBetweenTime(2021, 11, 1, 2021, 12, 1)); // 调用 PageResult<SmsTemplateDO> pageResult = smsTemplateService.getSmsTemplatePage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbSmsTemplate, pageResult.getList().get(0)); }
@Override public Result responseMessageForCheckConnectionToSCM(String responseBody) { return jsonResultMessageHandler.toResult(responseBody); }
@Test public void shouldHandleNullMessagesForCheckSCMConnectionResponse() throws Exception { assertSuccessResult(messageHandler.responseMessageForCheckConnectionToSCM("{\"status\":\"success\"}"), new ArrayList<>()); assertFailureResult(messageHandler.responseMessageForCheckConnectionToSCM("{\"status\":\"failure\"}"), new ArrayList<>()); }
@GetMapping( path = "/admin/namespace/{namespaceName}", produces = MediaType.APPLICATION_JSON_VALUE ) public ResponseEntity<NamespaceJson> getNamespace(@PathVariable String namespaceName) { try { admins.checkAdminUser(); var namespace = local.getNamespace(namespaceName); var serverUrl = UrlUtil.getBaseUrl(); namespace.membersUrl = UrlUtil.createApiUrl(serverUrl, "admin", "namespace", namespace.name, "members"); namespace.roleUrl = UrlUtil.createApiUrl(serverUrl, "admin", "namespace", namespace.name, "change-member"); return ResponseEntity.ok(namespace); } catch (NotFoundException exc) { var json = NamespaceJson.error("Namespace not found: " + namespaceName); return new ResponseEntity<>(json, HttpStatus.NOT_FOUND); } catch (ErrorResultException exc) { return exc.toResponseEntity(NamespaceJson.class); } }
@Test public void testGetNamespace() throws Exception { mockAdminUser(); mockNamespace(); mockMvc.perform(get("/admin/namespace/{namespace}", "foobar") .with(user("admin_user").authorities(new SimpleGrantedAuthority(("ROLE_ADMIN")))) .with(csrf().asHeader())) .andExpect(status().isOk()) .andExpect(content().json(namespaceJson(n -> { n.name = "foobar"; }))); }
public PaginationContext createPaginationContext(final Collection<ExpressionSegment> expressions, final ProjectionsContext projectionsContext, final List<Object> params) { Optional<String> rowNumberAlias = findRowNumberAlias(projectionsContext); if (!rowNumberAlias.isPresent()) { return new PaginationContext(null, null, params); } Collection<AndPredicate> andPredicates = expressions.stream().flatMap(each -> ExpressionExtractUtils.getAndPredicates(each).stream()).collect(Collectors.toList()); Collection<BinaryOperationExpression> rowNumberPredicates = getRowNumberPredicates(andPredicates, rowNumberAlias.get()); return rowNumberPredicates.isEmpty() ? new PaginationContext(null, null, params) : createPaginationWithRowNumber(rowNumberPredicates, params); }
@Test void assertCreatePaginationContextWhenParameterMarkerRowNumberValueSegment() { Projection projectionWithRowNumberAlias = new ColumnProjection(null, ROW_NUMBER_COLUMN_NAME, ROW_NUMBER_COLUMN_ALIAS, mock(DatabaseType.class)); ProjectionsContext projectionsContext = new ProjectionsContext(0, 0, false, Collections.singleton(projectionWithRowNumberAlias)); ColumnSegment left = new ColumnSegment(0, 10, new IdentifierValue(ROW_NUMBER_COLUMN_NAME)); ParameterMarkerExpressionSegment right = new ParameterMarkerExpressionSegment(0, 10, 0); BinaryOperationExpression expression = new BinaryOperationExpression(0, 0, left, right, ">", null); PaginationContext paginationContext = new RowNumberPaginationContextEngine(new OracleDatabaseType()) .createPaginationContext(Collections.singletonList(expression), projectionsContext, Collections.singletonList(1)); Optional<PaginationValueSegment> offSetSegmentPaginationValue = paginationContext.getOffsetSegment(); assertTrue(offSetSegmentPaginationValue.isPresent()); assertThat(offSetSegmentPaginationValue.get(), instanceOf(ParameterMarkerRowNumberValueSegment.class)); assertFalse(paginationContext.getRowCountSegment().isPresent()); }