focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public ConnectionFileName createChildName( String name, FileType type ) { String childAbsPath = getConnectionFileNameUtils().ensureTrailingSeparator( getPath() ) + name; return new ConnectionFileName( connection, childAbsPath, type ); }
@Test public void testCreateChildNameRespectsPercentEncodingOfChildName() { ConnectionFileName parentFileName = new ConnectionFileName( "connection", "/", FileType.FOLDER ); ConnectionFileName childFileName = parentFileName.createChildName( "child%25%20folder", FileType.FOLDER ); assertEquals( "pvfs://connection/child%25%20folder", childFileName.getURI() ); }
@VisibleForTesting MissingSegmentInfo findMissingSegments(Map<String, Map<String, String>> idealStateMap, Instant now) { // create the maps Map<Integer, LLCSegmentName> partitionGroupIdToLatestConsumingSegmentMap = new HashMap<>(); Map<Integer, LLCSegmentName> partitionGroupIdToLatestCompletedSegmentMap = new HashMap<>(); idealStateMap.forEach((segmentName, instanceToStatusMap) -> { LLCSegmentName llcSegmentName = LLCSegmentName.of(segmentName); if (llcSegmentName != null) { // Skip the uploaded realtime segments that don't conform to llc naming if (instanceToStatusMap.containsValue(SegmentStateModel.CONSUMING)) { updateMap(partitionGroupIdToLatestConsumingSegmentMap, llcSegmentName); } else if (instanceToStatusMap.containsValue(SegmentStateModel.ONLINE)) { updateMap(partitionGroupIdToLatestCompletedSegmentMap, llcSegmentName); } } }); MissingSegmentInfo missingSegmentInfo = new MissingSegmentInfo(); if (!_partitionGroupIdToLargestStreamOffsetMap.isEmpty()) { _partitionGroupIdToLargestStreamOffsetMap.forEach((partitionGroupId, largestStreamOffset) -> { if (!partitionGroupIdToLatestConsumingSegmentMap.containsKey(partitionGroupId)) { LLCSegmentName latestCompletedSegment = partitionGroupIdToLatestCompletedSegmentMap.get(partitionGroupId); if (latestCompletedSegment == null) { // There's no consuming or completed segment for this partition group. Possibilities: // 1) it's a new partition group that has not yet been detected // 2) the first consuming segment has been deleted from ideal state manually missingSegmentInfo._newPartitionGroupCount++; missingSegmentInfo._totalCount++; } else { // Completed segment is available, but there's no consuming segment. // Note that there is no problem in case the partition group has reached its end of life. SegmentZKMetadata segmentZKMetadata = _segmentMetadataFetcher .fetchSegmentZkMetadata(_realtimeTableName, latestCompletedSegment.getSegmentName()); StreamPartitionMsgOffset completedSegmentEndOffset = _streamPartitionMsgOffsetFactory.create(segmentZKMetadata.getEndOffset()); if (completedSegmentEndOffset.compareTo(largestStreamOffset) < 0) { // there are unconsumed messages available on the stream missingSegmentInfo._totalCount++; updateMaxDurationInfo(missingSegmentInfo, partitionGroupId, segmentZKMetadata.getCreationTime(), now); } } } }); } else { partitionGroupIdToLatestCompletedSegmentMap.forEach((partitionGroupId, latestCompletedSegment) -> { if (!partitionGroupIdToLatestConsumingSegmentMap.containsKey(partitionGroupId)) { missingSegmentInfo._totalCount++; long segmentCompletionTimeMillis = _segmentMetadataFetcher .fetchSegmentCompletionTime(_realtimeTableName, latestCompletedSegment.getSegmentName()); updateMaxDurationInfo(missingSegmentInfo, partitionGroupId, segmentCompletionTimeMillis, now); } }); } return missingSegmentInfo; }
@Test public void noMissingConsumingSegmentsScenario4() { // scenario 4: no missing segments, but connecting to stream throws exception // two partitions have reached end of life // since there's no way to detect if the partitions have reached end of life, those partitions are reported as // missing consuming segments Map<String, Map<String, String>> idealStateMap = new HashMap<>(); // partition 0 idealStateMap.put("tableA__0__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__0__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__0__2__20220601T1500Z", ImmutableMap.of("ServerX", "CONSUMING", "ServerY", "CONSUMING")); // partition 1 (has reached end of life) idealStateMap.put("tableA__1__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__1__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); // partition 2 idealStateMap.put("tableA__2__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__2__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__2__2__20220601T1500Z", ImmutableMap.of("ServerX", "CONSUMING", "ServerY", "CONSUMING")); // partition 3 idealStateMap.put("tableA__3__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__3__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__3__2__20220601T1500Z", ImmutableMap.of("ServerX", "CONSUMING", "ServerY", "CONSUMING")); // partition 4 (has reached end of life) idealStateMap.put("tableA__4__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); // partition 5 idealStateMap.put("tableA__5__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__5__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__5__2__20220601T1500Z", ImmutableMap.of("ServerX", "CONSUMING", "ServerY", "CONSUMING")); // setup segment metadata fetcher MissingConsumingSegmentFinder.SegmentMetadataFetcher metadataFetcher = mock(MissingConsumingSegmentFinder.SegmentMetadataFetcher.class); when(metadataFetcher.fetchSegmentCompletionTime("tableA", "tableA__1__1__20220601T1200Z")) .thenReturn(Instant.parse("2022-06-01T15:00:00.00Z").toEpochMilli()); when(metadataFetcher.fetchSegmentCompletionTime("tableA", "tableA__4__0__20220601T0900Z")) .thenReturn(Instant.parse("2022-06-01T12:00:00.00Z").toEpochMilli()); Instant now = Instant.parse("2022-06-01T18:00:00.00Z"); MissingConsumingSegmentFinder finder = new MissingConsumingSegmentFinder("tableA", metadataFetcher, new HashMap<>(), null); MissingConsumingSegmentFinder.MissingSegmentInfo info = finder.findMissingSegments(idealStateMap, now); assertEquals(info._totalCount, 2); assertEquals(info._newPartitionGroupCount, 0); assertEquals(info._maxDurationInMinutes, 6 * 60); // (18:00:00 - 12:00:00) in minutes }
@VisibleForTesting String lookUpPlatformSpecificImageManifest( ManifestListTemplate manifestListTemplate, Platform platform) throws UnlistedPlatformInManifestListException { EventHandlers eventHandlers = buildContext.getEventHandlers(); List<String> digests = manifestListTemplate.getDigestsForPlatform(platform.getArchitecture(), platform.getOs()); if (digests.isEmpty()) { String errorTemplate = buildContext.getBaseImageConfiguration().getImage() + " is a manifest list, but the list does not contain an image for architecture=%s, " + "os=%s. If your intention was to specify a platform for your image, see " + "https://github.com/GoogleContainerTools/jib/blob/master/docs/faq.md#how-do-i-specify-a-platform-in-the-manifest-list-or-oci-index-of-a-base-image"; String error = String.format(errorTemplate, platform.getArchitecture(), platform.getOs()); eventHandlers.dispatch(LogEvent.error(error)); throw new UnlistedPlatformInManifestListException(error); } // TODO: perhaps we should return multiple digests matching the platform. return digests.get(0); }
@Test public void testLookUpPlatformSpecificOciManifest() throws IOException, UnlistedPlatformInManifestListException { String manifestListJson = " {\n" + " \"schemaVersion\": 2,\n" + " \"mediaType\": \"application/vnd.oci.image.index.v1+json\",\n" + " \"manifests\": [\n" + " {\n" + " \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\",\n" + " \"size\": 424,\n" + " \"digest\": \"sha256:1111111111111111111111111111111111111111111111111111111111111111\",\n" + " \"platform\": {\n" + " \"architecture\": \"arm64\",\n" + " \"os\": \"linux\"\n" + " }\n" + " },\n" + " {\n" + " \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\",\n" + " \"size\": 425,\n" + " \"digest\": \"sha256:2222222222222222222222222222222222222222222222222222222222222222\",\n" + " \"platform\": {\n" + " \"architecture\": \"targetArchitecture\",\n" + " \"os\": \"targetOS\"\n" + " }\n" + " }\n" + " ]\n" + "}"; OciIndexTemplate manifestList = JsonTemplateMapper.readJson(manifestListJson, OciIndexTemplate.class); String manifestDigest = pullBaseImageStep.lookUpPlatformSpecificImageManifest( manifestList, new Platform("targetArchitecture", "targetOS")); Assert.assertEquals( "sha256:2222222222222222222222222222222222222222222222222222222222222222", manifestDigest); }
public Future<KafkaVersionChange> reconcile() { return getVersionFromController() .compose(i -> getPods()) .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testNewClusterWithAllVersions(VertxTestContext context) { VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), VERSIONS.defaultVersion().protocolVersion(), VERSIONS.defaultVersion().messageVersion()), mockNewCluster(null, null, List.of()) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.interBrokerProtocolVersion(), nullValue()); assertThat(c.logMessageFormatVersion(), nullValue()); assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); async.flag(); }))); }
@Override public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats); BooleanColumnStatsData aggregateData = aggregateColStats.getStatsData().getBooleanStats(); BooleanColumnStatsData newData = newColStats.getStatsData().getBooleanStats(); aggregateData.setNumTrues(aggregateData.getNumTrues() + newData.getNumTrues()); aggregateData.setNumFalses(aggregateData.getNumFalses() + newData.getNumFalses()); aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); }
@Test public void testMergeNonNullValues() { ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(Boolean.class) .numFalses(1) .numTrues(2) .numNulls(2) .build()); ColumnStatisticsObj newObj = createColumnStatisticsObj(new ColStatsBuilder<>(Boolean.class) .numFalses(1) .numTrues(2) .numNulls(3) .build()); MERGER.merge(aggrObj, newObj); newObj = createColumnStatisticsObj(new ColStatsBuilder<>(Boolean.class) .numFalses(1) .numTrues(1) .numNulls(1) .build()); MERGER.merge(aggrObj, newObj); ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(Boolean.class) .numFalses(3) .numTrues(5) .numNulls(6) .build(); assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData()); }
public static <T> String join(Iterator<T> iterator, CharSequence conjunction) { return StrJoiner.of(conjunction).append(iterator).toString(); }
@Test public void joinWithFuncTest() { final ArrayList<String> list = CollUtil.newArrayList("1", "2", "3", "4"); final String join = IterUtil.join(list.iterator(), ":", String::valueOf); assertEquals("1:2:3:4", join); }
public static <T> RedistributeArbitrarily<T> arbitrarily() { return new RedistributeArbitrarily<>(null, false); }
@Test @Category(ValidatesRunner.class) public void testRedistributePreservesMetadata() { PCollection<KV<String, ValueInSingleWindow<String>>> input = pipeline .apply( Create.windowedValues( WindowedValue.of( "foo", BoundedWindow.TIMESTAMP_MIN_VALUE, GlobalWindow.INSTANCE, PaneInfo.NO_FIRING), WindowedValue.of( "foo", new Instant(0), GlobalWindow.INSTANCE, PaneInfo.ON_TIME_AND_ONLY_FIRING), WindowedValue.of( "bar", new Instant(33), GlobalWindow.INSTANCE, PaneInfo.createPane(false, false, PaneInfo.Timing.LATE, 1, 1)), WindowedValue.of( "bar", GlobalWindow.INSTANCE.maxTimestamp(), GlobalWindow.INSTANCE, PaneInfo.NO_FIRING)) .withCoder(StringUtf8Coder.of()) .withWindowCoder(GlobalWindow.Coder.INSTANCE)) .apply(WithKeys.<String, String>of(v -> v).withKeyType(TypeDescriptors.strings())) .apply("ReifyOriginalMetadata", Reify.windowsInValue()); // The outer WindowedValue is the reified metadata post-reshuffle. The inner // WindowedValue is the pre-reshuffle metadata. PCollection<ValueInSingleWindow<ValueInSingleWindow<String>>> output = input .apply(Redistribute.arbitrarily()) .apply("ReifyRedistributedMetadata", Reify.windowsInValue()) .apply(Values.create()); PAssert.that(output) .satisfies( input1 -> { for (ValueInSingleWindow<ValueInSingleWindow<String>> elem : input1) { Instant originalTimestamp = elem.getValue().getTimestamp(); Instant afterRedistributeTimestamp = elem.getTimestamp(); assertThat( "Redistribute did not preserve element timestamp for " + elem, afterRedistributeTimestamp, equalTo(originalTimestamp)); PaneInfo originalPaneInfo = elem.getValue().getPane(); PaneInfo afterRedistributePaneInfo = elem.getPane(); assertThat( "Redistribute did not preserve pane info for " + elem, afterRedistributePaneInfo, equalTo(originalPaneInfo)); BoundedWindow originalWindow = elem.getValue().getWindow(); BoundedWindow afterRedistributeWindow = elem.getWindow(); assertThat( "Redistribute did not preserve window for " + elem, afterRedistributeWindow, equalTo(originalWindow)); } return null; }); pipeline.run(); }
public Set<String> keySet() { return keys; }
@Test public void testNullProperties() { HazelcastProperties properties = new HazelcastProperties((Properties) null); assertTrue(properties.keySet().isEmpty()); }
public String getURI() { COSBase base = action.getDictionaryObject(COSName.URI); if (base instanceof COSString) { byte[] bytes = ((COSString) base).getBytes(); if (bytes.length >= 2) { // UTF-16 (BE) if ((bytes[0] & 0xFF) == 0xFE && (bytes[1] & 0xFF) == 0xFF) { return action.getString(COSName.URI); } // UTF-16 (LE) if ((bytes[0] & 0xFF) == 0xFF && (bytes[1] & 0xFF) == 0xFE) { return action.getString(COSName.URI); } } return new String(bytes, StandardCharsets.UTF_8); } return null; }
@Test void testUTF16LEURI() throws IOException { PDActionURI actionURI = new PDActionURI(); COSString utf16URI = COSString.parseHex("FFFE68007400740070003A00"); actionURI.getCOSObject().setItem(COSName.URI, utf16URI); assertEquals("http:", actionURI.getURI()); }
@Override public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException { URL url = invoker.getUrl(); boolean shouldAuth = url.getParameter(Constants.SERVICE_AUTH, false); if (shouldAuth) { Authenticator authenticator = applicationModel .getExtensionLoader(Authenticator.class) .getExtension(url.getParameter(Constants.AUTHENTICATOR, Constants.DEFAULT_AUTHENTICATOR)); try { authenticator.authenticate(invocation, url); } catch (Exception e) { return AsyncRpcResult.newDefaultAsyncResult(e, invocation); } } return invoker.invoke(invocation); }
@Test void testAuthDisabled() { URL url = mock(URL.class); Invoker invoker = mock(Invoker.class); Invocation invocation = mock(RpcInvocation.class); when(invoker.getUrl()).thenReturn(url); ProviderAuthFilter providerAuthFilter = new ProviderAuthFilter(ApplicationModel.defaultModel()); providerAuthFilter.invoke(invoker, invocation); verify(url, never()).getParameter(eq(Constants.AUTHENTICATOR), eq(Constants.DEFAULT_AUTHENTICATOR)); }
static int[] findMinMaxLengthsInSymbols(String[] symbols) { int min = Integer.MAX_VALUE; int max = 0; for (String symbol : symbols) { int len = symbol.length(); // some SENTINEL values can be empty strings, the month at index 12 or the // weekday at index 0 if (len == 0) continue; min = Math.min(min, len); max = Math.max(max, len); } return new int[] { min, max }; }
@Test public void emptyStringValuesShouldBeIgnoredByFindMinMaxLengthsInSymbols() { String[] symbols = new String[] { "aaa", "" }; int[] results = CharSequenceToRegexMapper.findMinMaxLengthsInSymbols(symbols); assertEquals(3, results[0]); assertEquals(3, results[1]); }
public boolean doProjectNeedIssueSync(DbSession dbSession, String projectUuid) { return !findProjectUuidsWithIssuesSyncNeed(dbSession, Sets.newHashSet(projectUuid)).isEmpty(); }
@Test public void doProjectNeedIssueSync() { ProjectData projectData1 = insertProjectWithBranches(false, 0); assertThat(underTest.doProjectNeedIssueSync(db.getSession(), projectData1.getProjectDto().getUuid())).isFalse(); ProjectData projectData2 = insertProjectWithBranches(true, 0); assertThat(underTest.doProjectNeedIssueSync(db.getSession(), projectData2.getProjectDto().getUuid())).isTrue(); }
public FileSystem get(Key key) { synchronized (mLock) { Value value = mCacheMap.get(key); FileSystem fs; if (value == null) { // On cache miss, create and insert a new FileSystem instance, fs = FileSystem.Factory.create(FileSystemContext.create(key.mSubject, key.mConf)); mCacheMap.put(key, new Value(fs, 1)); } else { fs = value.mFileSystem; value.mRefCount.getAndIncrement(); } return new InstanceCachingFileSystem(fs, key); } }
@Test public void getDifferentKeys() { Key key1 = createTestFSKey("user1"); Key key2 = createTestFSKey("user2"); FileSystem fs1 = mFileSystemCache.get(key1); FileSystem fs2 = mFileSystemCache.get(key2); assertNotSame(getDelegatedFileSystem(fs1), getDelegatedFileSystem(fs2)); assertFalse(fs1.isClosed()); assertFalse(fs2.isClosed()); }
@Override public Collection<String> doSharding(final Collection<String> availableTargetNames, final HintShardingValue<Comparable<?>> shardingValue) { return shardingValue.getValues().isEmpty() ? availableTargetNames : shardingValue.getValues().stream().map(this::doSharding).collect(Collectors.toList()); }
@Test void assertDoShardingWithMultiValues() { List<String> availableTargetNames = Arrays.asList("t_order_0", "t_order_1", "t_order_2", "t_order_3"); HintShardingValue<Comparable<?>> shardingValue = new HintShardingValue<>("t_order", "order_id", Arrays.asList(1, 2, 3, 4)); Collection<String> actual = hintInlineShardingAlgorithm.doSharding(availableTargetNames, shardingValue); assertTrue(actual.containsAll(availableTargetNames)); }
@Override protected ObjectPermissions getPermissions() { return mPermissions.get(); }
@Test public void getPermissionsNoMapping() throws Exception { Map<PropertyKey, Object> conf = new HashMap<>(); conf.put(PropertyKey.UNDERFS_S3_OWNER_ID_TO_USERNAME_MAPPING, "111=userid"); try (Closeable c = new ConfigurationRule(conf, CONF).toResource()) { S3AUnderFileSystem s3UnderFileSystem = new S3AUnderFileSystem(new AlluxioURI("s3a://" + BUCKET_NAME), mClient, mAsyncClient, BUCKET_NAME, mExecutor, mManager, UnderFileSystemConfiguration.defaults(CONF), false, false); Mockito.when(mClient.getS3AccountOwner()).thenReturn(new Owner("0", "test")); Mockito.when(mClient.getBucketAcl(Mockito.anyString())).thenReturn(new AccessControlList()); ObjectUnderFileSystem.ObjectPermissions permissions = s3UnderFileSystem.getPermissions(); Assert.assertEquals("test", permissions.getOwner()); Assert.assertEquals("test", permissions.getGroup()); Assert.assertEquals(0, permissions.getMode()); } }
public static long roundCapacity(long requestedCapacity) { if (requestedCapacity > MAX_LONG_CAPACITY) { throw new IllegalArgumentException(requestedCapacity + " is greater than max allowed capacity[" + MAX_LONG_CAPACITY + "]."); } return Math.max(MIN_CAPACITY, QuickMath.nextPowerOfTwo(requestedCapacity)); }
@Test public void testRoundCapacity() { int capacity = 2342; int roundedCapacity = roundCapacity(capacity); assertEquals(4096, roundedCapacity); }
ReaderCache(Duration cacheDuration, Executor invalidationExecutor) { this.invalidationExecutor = invalidationExecutor; this.cache = CacheBuilder.newBuilder() .expireAfterWrite(cacheDuration.getMillis(), TimeUnit.MILLISECONDS) .removalListener( (RemovalNotification<WindmillComputationKey, CacheEntry> notification) -> { if (notification.getCause() != RemovalCause.EXPLICIT) { LOG.info( "Asynchronously closing reader for {} as it has been idle for over {}", notification.getKey(), cacheDuration); asyncCloseReader(notification.getKey(), notification.getValue()); } }) .build(); }
@Test public void testReaderCache() throws IOException { // Test basic caching expectations readerCache.cacheReader( WindmillComputationKey.create(C_ID, KEY_1, SHARDING_KEY), 1, 0, reader1); readerCache.cacheReader( WindmillComputationKey.create(C_ID, KEY_2, SHARDING_KEY), 2, 0, reader2); assertEquals( reader1, readerCache.acquireReader(WindmillComputationKey.create(C_ID, KEY_1, SHARDING_KEY), 1, 1)); assertNull( readerCache.acquireReader( WindmillComputationKey.create(C_ID_1, KEY_1, SHARDING_KEY), 1, 1)); assertNull( readerCache.acquireReader( WindmillComputationKey.create(C_ID, KEY_1, SHARDING_KEY_1), 1, 1)); // Trying to override existing reader should throw try { readerCache.cacheReader( WindmillComputationKey.create(C_ID, KEY_2, SHARDING_KEY), 2, 2, reader1); fail("Exception should have been thrown"); } catch (RuntimeException expected) { // expected } // And it should not have overwritten the old value assertEquals( reader2, readerCache.acquireReader(WindmillComputationKey.create(C_ID, KEY_2, SHARDING_KEY), 2, 3)); assertNull( "acquireReader(WindmillComputationKey.create() should remove matching entry", readerCache.acquireReader(WindmillComputationKey.create(C_ID, KEY_2, SHARDING_KEY), 2, 4)); // Make sure computationId is part of the cache key readerCache.cacheReader( WindmillComputationKey.create(C_ID_1, KEY_1, SHARDING_KEY), 1, 5, reader2); assertEquals( reader2, readerCache.acquireReader( WindmillComputationKey.create(C_ID_1, KEY_1, SHARDING_KEY), 1, 6)); // Make sure sharding key is part of the cache key readerCache.cacheReader( WindmillComputationKey.create(C_ID, KEY_1, SHARDING_KEY_1), 1, 0, reader3); assertEquals( reader3, readerCache.acquireReader( WindmillComputationKey.create(C_ID, KEY_1, SHARDING_KEY_1), 1, 1)); }
@Bean("Configuration") public Configuration provide(Settings settings) { return new ServerConfigurationAdapter(settings); }
@Test @UseDataProvider("emptyStrings") public void getStringArray_parses_empty_string_differently_from_Settings_ifmultivalue_property(String emptyValue) { settings.setProperty(multivalueKey, emptyValue); Configuration configuration = underTest.provide(settings); getStringArrayBehaviorDiffers(configuration, multivalueKey, EMPTY_STRING_ARRAY); }
protected static List<RepositoryElementMetaInterface> loadRepositoryObjects( RepositoryDirectoryInterface dir, boolean getTransformations, boolean getJobs, Repository rep ) throws KettleDatabaseException { // Then show the transformations & jobs in that directory... List<RepositoryElementMetaInterface> repositoryObjects = new ArrayList<RepositoryElementMetaInterface>(); if ( dir.getRepositoryObjects() == null ) { try { dir.setRepositoryObjects( rep.getJobAndTransformationObjects( dir.getObjectId(), false ) ); } catch ( KettleException e ) { throw new KettleDatabaseException( e ); } } List<RepositoryObjectType> allowedTypes = new ArrayList<>( 2 ); if ( getTransformations ) { allowedTypes.add( RepositoryObjectType.TRANSFORMATION ); } if ( getJobs ) { allowedTypes.add( RepositoryObjectType.JOB ); } for ( RepositoryElementMetaInterface repoObject : dir.getRepositoryObjects() ) { if ( allowedTypes.contains( repoObject.getObjectType() ) ) { repositoryObjects.add( repoObject ); } } return repositoryObjects; }
@Test public void testLoadRepositoryObjectsNulled() throws Exception { RepositoryDirectory root = new RepositoryDirectory(); root.setObjectId( new LongObjectId( 0L ) ); RepositoryDirectory dir = new RepositoryDirectory(); dir.setObjectId( new LongObjectId( 1L ) ); root.addSubdirectory( dir ); RepositoryElementMetaInterface meta = mock( RepositoryElementMetaInterface.class ); when( meta.getObjectType() ).thenReturn( RepositoryObjectType.TRANSFORMATION ); Repository repo = mock( Repository.class ); when( repo.getJobAndTransformationObjects( dir.getObjectId(), false ) ).thenReturn( Collections.singletonList( meta ) ); when( repo.getJobAndTransformationObjects( root.getObjectId(), false ) ).thenReturn( Collections.emptyList() ); assertTrue( RepositoryDirectoryUI.loadRepositoryObjects( root, true, true, repo ).isEmpty() ); assertEquals( 1, RepositoryDirectoryUI.loadRepositoryObjects( dir, true, true, repo ).size() ); }
@Override public void renamePropertyKey(String oldKey, String newKey) { // do nothing }
@Test public void renamePropertyKey() { underTest.renamePropertyKey(null, null); assertNoInteraction(); }
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list, @ParameterName("element") Object element) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } if (element == null) { return FEELFnResult.ofResult(list.contains(element)); } Object e = NumberEvalHelper.coerceNumber(element); boolean found = false; ListIterator<?> it = list.listIterator(); while (it.hasNext() && !found) { Object next = NumberEvalHelper.coerceNumber(it.next()); found = itemEqualsSC(e, next); } return FEELFnResult.ofResult(found); }
@Test void invokeNotContains() { FunctionTestUtil.assertResult(listContainsFunction.invoke(Arrays.asList(1, 2, "test"), "testtt"), false); FunctionTestUtil.assertResult(listContainsFunction.invoke(Arrays.asList(1, 2, "test"), 3), false); FunctionTestUtil.assertResult(listContainsFunction.invoke(Arrays.asList(1, 2, "test"), BigDecimal.valueOf(3)) , false); }
@Override public DescriptiveUrlBag toUrl(final Path file) { final DescriptiveUrlBag list = new DescriptiveUrlBag(); final DescriptiveUrl base = new DefaultWebUrlProvider().toUrl(host); list.add(new DescriptiveUrl(URI.create(String.format("%s%s", base.getUrl(), URIEncoder.encode( PathNormalizer.normalize(PathRelativizer.relativize(PathNormalizer.normalize(host.getDefaultPath(), true), file.getAbsolute())) ))).normalize(), base.getType(), base.getHelp()) ); return list; }
@Test public void testToUrl() { final Host host = new Host(new TestProtocol(), "test.cyberduck.ch"); assertEquals("http://test.cyberduck.ch/", new DefaultWebUrlProvider().toUrl(host).getUrl()); assertEquals("http://test.cyberduck.ch/my/documentroot/f%20f", new HostWebUrlProvider(host).toUrl(new Path("/my/documentroot/f f", EnumSet.of(Path.Type.directory))).find(DescriptiveUrl.Type.http).getUrl()); }
public List<Pane<T>> list() { return list(System.currentTimeMillis()); }
@Test void testList() { window.currentPane(); assertTrue(0 < window.list().size()); }
@Override public void execute(Exchange exchange) throws SmppException { SubmitMulti[] submitMulties = createSubmitMulti(exchange); List<SubmitMultiResult> results = new ArrayList<>(submitMulties.length); for (SubmitMulti submitMulti : submitMulties) { SubmitMultiResult result; if (log.isDebugEnabled()) { log.debug("Sending multiple short messages for exchange id '{}'...", exchange.getExchangeId()); } try { result = session.submitMultiple( submitMulti.getServiceType(), TypeOfNumber.valueOf(submitMulti.getSourceAddrTon()), NumberingPlanIndicator.valueOf(submitMulti.getSourceAddrNpi()), submitMulti.getSourceAddr(), (Address[]) submitMulti.getDestAddresses(), new ESMClass(submitMulti.getEsmClass()), submitMulti.getProtocolId(), submitMulti.getPriorityFlag(), submitMulti.getScheduleDeliveryTime(), submitMulti.getValidityPeriod(), new RegisteredDelivery(submitMulti.getRegisteredDelivery()), new ReplaceIfPresentFlag(submitMulti.getReplaceIfPresentFlag()), DataCodings.newInstance(submitMulti.getDataCoding()), submitMulti.getSmDefaultMsgId(), submitMulti.getShortMessage(), submitMulti.getOptionalParameters()); results.add(result); } catch (Exception e) { throw new SmppException(e); } } if (log.isDebugEnabled()) { log.debug("Sent multiple short messages for exchange id '{}' and received results '{}'", exchange.getExchangeId(), results); } List<String> messageIDs = new ArrayList<>(results.size()); // {messageID : [{destAddr : address, error : errorCode}]} Map<String, List<Map<String, Object>>> errors = new HashMap<>(); for (SubmitMultiResult result : results) { UnsuccessDelivery[] deliveries = result.getUnsuccessDeliveries(); if (deliveries != null) { List<Map<String, Object>> undelivered = new ArrayList<>(); for (UnsuccessDelivery delivery : deliveries) { Map<String, Object> error = new HashMap<>(); error.put(SmppConstants.DEST_ADDR, delivery.getDestinationAddress().getAddress()); error.put(SmppConstants.ERROR, delivery.getErrorStatusCode()); undelivered.add(error); } if (!undelivered.isEmpty()) { errors.put(result.getMessageId(), undelivered); } } messageIDs.add(result.getMessageId()); } Message message = ExchangeHelper.getResultMessage(exchange); message.setHeader(SmppConstants.ID, messageIDs); message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size()); if (!errors.isEmpty()) { message.setHeader(SmppConstants.ERROR, errors); } }
@Test public void executeWithValidityPeriodAsString() throws Exception { Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); exchange.getIn().setHeader(SmppConstants.COMMAND, "SubmitMulti"); exchange.getIn().setHeader(SmppConstants.ID, "1"); exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_TON, TypeOfNumber.NATIONAL.value()); exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_NPI, NumberingPlanIndicator.NATIONAL.value()); exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR, "1818"); exchange.getIn().setHeader(SmppConstants.DEST_ADDR_TON, TypeOfNumber.INTERNATIONAL.value()); exchange.getIn().setHeader(SmppConstants.DEST_ADDR_NPI, NumberingPlanIndicator.INTERNET.value()); exchange.getIn().setHeader(SmppConstants.DEST_ADDR, Collections.singletonList("1919")); exchange.getIn().setHeader(SmppConstants.SCHEDULE_DELIVERY_TIME, new Date(1111111)); exchange.getIn().setHeader(SmppConstants.VALIDITY_PERIOD, "000003000000000R"); // three days exchange.getIn().setHeader(SmppConstants.PROTOCOL_ID, (byte) 1); exchange.getIn().setHeader(SmppConstants.PRIORITY_FLAG, (byte) 2); exchange.getIn().setHeader(SmppConstants.REGISTERED_DELIVERY, new RegisteredDelivery(SMSCDeliveryReceipt.FAILURE).value()); exchange.getIn().setHeader(SmppConstants.REPLACE_IF_PRESENT_FLAG, ReplaceIfPresentFlag.REPLACE.value()); exchange.getIn().setBody("short message body"); when(session.submitMultiple(eq("CMT"), eq(TypeOfNumber.NATIONAL), eq(NumberingPlanIndicator.NATIONAL), eq("1818"), eq(new Address[] { new Address(TypeOfNumber.INTERNATIONAL, NumberingPlanIndicator.INTERNET, "1919") }), eq(new ESMClass()), eq((byte) 1), eq((byte) 2), eq("-300101001831100+"), eq("000003000000000R"), eq(new RegisteredDelivery(SMSCDeliveryReceipt.FAILURE)), eq(ReplaceIfPresentFlag.REPLACE), eq(DataCodings.newInstance((byte) 0)), eq((byte) 0), eq("short message body".getBytes()))) .thenReturn(new SubmitMultiResult("1", null, null)); command.execute(exchange); assertEquals(Collections.singletonList("1"), exchange.getMessage().getHeader(SmppConstants.ID)); assertEquals(1, exchange.getMessage().getHeader(SmppConstants.SENT_MESSAGE_COUNT)); assertNull(exchange.getMessage().getHeader(SmppConstants.ERROR)); }
@LiteralParameters("x") @ScalarOperator(INDETERMINATE) @SqlType(StandardTypes.BOOLEAN) public static boolean indeterminate(@SqlType("varchar(x)") Slice value, @IsNull boolean isNull) { return isNull; }
@Test public void testIndeterminate() { assertOperator(INDETERMINATE, "cast(null as varchar)", BOOLEAN, true); assertOperator(INDETERMINATE, "'foo'", BOOLEAN, false); assertOperator(INDETERMINATE, "cast(123456 as varchar)", BOOLEAN, false); assertOperator(INDETERMINATE, "cast(12345.0123 as varchar)", BOOLEAN, false); assertOperator(INDETERMINATE, "cast(true as varchar)", BOOLEAN, false); }
static Thread createThread(Runnable r) { Thread thread = Executors.defaultThreadFactory().newThread(r); thread.setName(String.format("SonarLint-PushEvent-%d", System.nanoTime())); thread.setPriority(MIN_PRIORITY); thread.setDaemon(true); return thread; }
@Test public void createThread_shouldCreateDaemonWithNameSonarLintPushEvent() { assertThat(SonarLintPushEventExecutorServiceImpl.createThread(() -> { })) .extracting(Thread::getPriority, Thread::isDaemon, thread -> thread.getName().startsWith("SonarLint-PushEvent-")) .containsExactly(Thread.MIN_PRIORITY, true, true); }
@Override public void authenticate( final JsonObject authInfo, final Handler<AsyncResult<User>> resultHandler ) { final String username = authInfo.getString("username"); if (username == null) { resultHandler.handle(Future.failedFuture("authInfo missing 'username' field")); return; } final String password = authInfo.getString("password"); if (password == null) { resultHandler.handle(Future.failedFuture("authInfo missing 'password' field")); return; } server.getWorkerExecutor().executeBlocking( promisedUser -> getUser(contextName, username, password, promisedUser), false, resultHandler ); }
@Test public void shouldFailToAuthenticateOnMissingUsername() { // Given: when(authInfo.getString("username")).thenReturn(null); // When: authProvider.authenticate(authInfo, userHandler); // Then: verifyLoginFailure("authInfo missing 'username' field"); }
@Override public void updatePort(KubevirtPort port) { checkNotNull(port, ERR_NULL_PORT); checkArgument(!Strings.isNullOrEmpty(port.macAddress().toString()), ERR_NULL_PORT_MAC); checkArgument(!Strings.isNullOrEmpty(port.networkId()), ERR_NULL_PORT_NET_ID); kubevirtPortStore.updatePort(port); log.debug(String.format(MSG_PORT, port.macAddress().toString(), MSG_UPDATED)); }
@Test(expected = NullPointerException.class) public void testUpdateNullPort() { target.updatePort(null); }
@VisibleForTesting static String generateFile(String template, QualifiedVersion version, Set<ChangelogEntry> changelogs) throws IOException { final var changelogsByTypeByArea = buildChangelogBreakdown(changelogs); final Map<String, Object> bindings = new HashMap<>(); bindings.put("version", version); bindings.put("changelogsByTypeByArea", changelogsByTypeByArea); bindings.put("TYPE_LABELS", TYPE_LABELS); return TemplateUtils.render(template, bindings); }
@Test public void generateFile_rendersCorrectMarkup() throws Exception { // given: final String template = getResource("/templates/release-notes.asciidoc"); final String expectedOutput = getResource( "/org/elasticsearch/gradle/internal/release/ReleaseNotesGeneratorTest.generateFile.asciidoc" ); final Set<ChangelogEntry> entries = getEntries(); // when: final String actualOutput = ReleaseNotesGenerator.generateFile(template, QualifiedVersion.of("8.2.0-SNAPSHOT"), entries); // then: assertThat(actualOutput, equalTo(expectedOutput)); }
@Override public boolean isGenerateSQLToken(final SQLStatementContext sqlStatementContext) { return sqlStatementContext instanceof WhereAvailable && !((WhereAvailable) sqlStatementContext).getWhereSegments().isEmpty(); }
@Test void assertIsGenerateSQLToken() { generator.setSchemas(Collections.emptyMap()); assertTrue(generator.isGenerateSQLToken(EncryptGeneratorFixtureBuilder.createUpdateStatementContext())); }
public void isNotInstanceOf(Class<?> clazz) { if (clazz == null) { throw new NullPointerException("clazz"); } if (Platform.classMetadataUnsupported()) { throw new UnsupportedOperationException( "isNotInstanceOf is not supported under -XdisableClassMetadata"); } if (actual == null) { return; // null is not an instance of clazz. } if (isInstanceOfType(actual, clazz)) { failWithActual("expected not to be an instance of", clazz.getName()); /* * TODO(cpovirk): Consider including actual.getClass() if it's not clazz itself but only a * subtype. */ } }
@Test public void isNotInstanceOfExactType() { expectFailure.whenTesting().that(5).isNotInstanceOf(Integer.class); assertFailureKeys("expected not to be an instance of", "but was"); assertFailureValue("expected not to be an instance of", "java.lang.Integer"); }
@SuppressWarnings("unchecked") public Output run(RunContext runContext) throws Exception { Logger logger = runContext.logger(); try (HttpClient client = this.client(runContext, this.method)) { HttpRequest<String> request = this.request(runContext); HttpResponse<String> response; try { response = client .toBlocking() .exchange(request, Argument.STRING, Argument.STRING); // check that the string is a valid Unicode string if (response.getBody().isPresent()) { OptionalInt illegalChar = response.body().chars().filter(c -> !Character.isDefined(c)).findFirst(); if (illegalChar.isPresent()) { throw new IllegalArgumentException("Illegal unicode code point in request body: " + illegalChar.getAsInt() + ", the Request task only support valid Unicode strings as body.\n" + "You can try using the Download task instead."); } } } catch (HttpClientResponseException e) { if (!allowFailed) { throw e; } //noinspection unchecked response = (HttpResponse<String>) e.getResponse(); } logger.debug("Request '{}' with the response code '{}'", request.getUri(), response.getStatus().getCode()); return this.output(runContext, request, response); } }
@Test void form() throws Exception { try ( ApplicationContext applicationContext = ApplicationContext.run(); EmbeddedServer server = applicationContext.getBean(EmbeddedServer.class).start(); ) { Request task = Request.builder() .id(RequestTest.class.getSimpleName()) .type(RequestTest.class.getName()) .method(HttpMethod.POST) .contentType(MediaType.APPLICATION_FORM_URLENCODED) .uri(server.getURL().toString() + "/post/simple") .headers(ImmutableMap.of( "test", "{{ inputs.test }}" )) .formData(ImmutableMap.of("hello", "world")) .build(); RunContext runContext = TestsUtils.mockRunContext(this.runContextFactory, task, ImmutableMap.of( "test", "value" )); Request.Output output = task.run(runContext); assertThat(output.getBody(), is("world > value")); assertThat(output.getCode(), is(200)); } }
public int size() { return data.size(); }
@Test public void testDense() { MockOutputFactory mockFactory = new MockOutputFactory(); MockDataSourceProvenance mockProvenance = new MockDataSourceProvenance(); MockOutput mockOutput = new MockOutput("test"); MutableDataset<MockOutput> dataset = new MutableDataset<>(mockProvenance, mockFactory); // Empty datasets are dense assertTrue(dataset.isDense()); ArrayExample<MockOutput> first = new ArrayExample<>(mockOutput,new String[]{"a","b","c"},new double[]{1,1,1}); ArrayExample<MockOutput> second = new ArrayExample<>(mockOutput,new String[]{"a","b","c","d"},new double[]{1,1,1,1}); ArrayExample<MockOutput> third = new ArrayExample<>(mockOutput,new String[]{"a","b","c"},new double[]{3,3,3}); ArrayExample<MockOutput> fourth = new ArrayExample<>(mockOutput,new String[]{"b","c"},new double[]{1,1}); dataset.add(first); MutableDataset<MockOutput> deser = (MutableDataset<MockOutput>) Helpers.testDatasetSerialization(dataset); // This example is dense assertTrue(dataset.isDense()); assertTrue(deser.isDense()); dataset.add(second); deser = (MutableDataset<MockOutput>) Helpers.testDatasetSerialization(dataset); // This example is dense, but it makes the previous one not dense as it adds a new feature assertFalse(dataset.isDense()); assertFalse(deser.isDense()); // flush out the previous test dataset.clear(); dataset.add(first); dataset.add(third); // These examples are both dense assertTrue(dataset.isDense()); // flush out old test dataset.clear(); // Add all the examples, making it sparse dataset.add(first); dataset.add(second); dataset.add(third); dataset.add(fourth); // Should be sparse assertFalse(dataset.isDense()); // Densify it dataset.densify(); // Now it should be dense assertTrue(dataset.isDense()); ArrayExample<MockOutput> fifth = new ArrayExample<>(mockOutput,new String[]{"a","b","c","d","e"},new double[]{1,1,1,1,1}); // Makes the previous examples sparse dataset.add(fifth); assertFalse(dataset.isDense()); dataset.densify(); assertTrue(dataset.isDense()); for (Example<MockOutput> e : dataset) { assertEquals(5,e.size()); } Helpers.testDatasetSerialization(dataset); }
private boolean rename(ChannelSftp channel, Path src, Path dst) throws IOException { Path workDir; try { workDir = new Path(channel.pwd()); } catch (SftpException e) { throw new IOException(e); } Path absoluteSrc = makeAbsolute(workDir, src); Path absoluteDst = makeAbsolute(workDir, dst); if (!exists(channel, absoluteSrc)) { throw new IOException(String.format(E_SPATH_NOTEXIST, src)); } if (exists(channel, absoluteDst)) { throw new IOException(String.format(E_DPATH_EXIST, dst)); } boolean renamed = true; try { final String previousCwd = channel.pwd(); channel.cd("/"); channel.rename(src.toUri().getPath(), dst.toUri().getPath()); channel.cd(previousCwd); } catch (SftpException e) { renamed = false; } return renamed; }
@Test(expected=java.io.IOException.class) public void testRenamingFileOntoExistingFile() throws Exception { Path file1 = touch(localFs, name.getMethodName().toLowerCase() + "1"); Path file2 = touch(localFs, name.getMethodName().toLowerCase() + "2"); sftpFs.rename(file1, file2); }
@Override public Enumeration<URL> getResources(String name) throws IOException { List<URL> resources = new ArrayList<>(); ClassLoadingStrategy loadingStrategy = getClassLoadingStrategy(name); log.trace("Received request to load resources '{}'", name); for (ClassLoadingStrategy.Source classLoadingSource : loadingStrategy.getSources()) { switch (classLoadingSource) { case APPLICATION: if (getParent() != null) { resources.addAll(Collections.list(getParent().getResources(name))); } break; case PLUGIN: resources.addAll(Collections.list(findResources(name))); break; case DEPENDENCIES: resources.addAll(findResourcesFromDependencies(name)); break; } } return Collections.enumeration(resources); }
@Test void parentLastGetResourcesNonExisting() throws IOException { assertFalse(parentLastPluginClassLoader.getResources("META-INF/non-existing-file").hasMoreElements()); }
public WsResponse call(WsRequest request) { checkState(!globalMode.isMediumTest(), "No WS call should be made in medium test mode"); WsResponse response = target.wsConnector().call(request); failIfUnauthorized(response); checkAuthenticationWarnings(response); return response; }
@Test public void call_whenBadRequest_shouldFailWithMessage() { WsRequest request = newRequest(); server.stubFor(get(urlEqualTo(URL_ENDPOINT)) .willReturn(aResponse() .withStatus(400) .withBody("{\"errors\":[{\"msg\":\"Boo! bad request! bad!\"}]}"))); DefaultScannerWsClient client = new DefaultScannerWsClient(wsClient, true, new GlobalAnalysisMode(new ScannerProperties(Collections.emptyMap())), analysisWarnings); assertThatThrownBy(() -> client.call(request)) .isInstanceOf(MessageException.class) .hasMessage("Boo! bad request! bad!"); }
public void registerPublisher(Function<NodeEngine, MetricsPublisher> registerFunction) { if (config.isEnabled()) { MetricsPublisher publisher = registerFunction.apply(nodeEngine); publishers.add(publisher); scheduleMetricsCollectorIfNeeded(); } else { logger.fine("Custom publisher is not registered with function %s as the metrics system is disabled", registerFunction); } }
@Test public void testMetricsCollectedIfMetricsEnabledAndMcJmxDisabledButCustomPublisherRegistered() { config.getMetricsConfig() .setEnabled(true); config.getMetricsConfig().getManagementCenterConfig() .setEnabled(false); config.getMetricsConfig().getJmxConfig() .setEnabled(false); MetricsPublisher publisherMock = mock(MetricsPublisher.class); MetricsService metricsService = prepareMetricsService(); metricsService.registerPublisher(nodeEngine -> publisherMock); assertTrueEventually(() -> { verify(publisherMock, atLeastOnce()).publishDouble(any(), anyDouble()); verify(publisherMock, atLeastOnce()).publishLong(any(), anyLong()); }); }
public void addSuppressedLoggingExceptions(Class<?>... exceptionClass) { exceptionsHandler.addSuppressedLoggingExceptions(exceptionClass); }
@Test public void testExceptionsHandlerSuppressed() { Server.ExceptionsHandler handler = new Server.ExceptionsHandler(); handler.addSuppressedLoggingExceptions(IOException.class); handler.addSuppressedLoggingExceptions(RpcServerException.class, IpcException.class); assertTrue(handler.isSuppressedLog(IOException.class)); assertTrue(handler.isSuppressedLog(RpcServerException.class)); assertTrue(handler.isSuppressedLog(IpcException.class)); assertFalse(handler.isSuppressedLog(RpcClientException.class)); }
@Override public Map<K, Object> executeOnKeys(Set<K> keys, com.hazelcast.map.EntryProcessor entryProcessor) { return map.executeOnKeys(keys, entryProcessor); }
@Test public void testExecuteOnKeys() { map.put(23, "value-23"); map.put(42, "value-42"); map.put(65, "value-65"); Set<Integer> keys = new HashSet<>(asList(23, 65, 88)); Map<Integer, Object> resultMap = adapter.executeOnKeys(keys, new IMapReplaceEntryProcessor("value", "newValue")); assertEquals(2, resultMap.size()); assertEquals("newValue-23", resultMap.get(23)); assertEquals("newValue-65", resultMap.get(65)); assertEquals("newValue-23", map.get(23)); assertEquals("value-42", map.get(42)); assertEquals("newValue-65", map.get(65)); assertNull(map.get(88)); }
public HttpHeaders preflightResponseHeaders() { if (preflightHeaders.isEmpty()) { return EmptyHttpHeaders.INSTANCE; } final HttpHeaders preflightHeaders = new DefaultHttpHeaders(); for (Entry<CharSequence, Callable<?>> entry : this.preflightHeaders.entrySet()) { final Object value = getValue(entry.getValue()); if (value instanceof Iterable) { preflightHeaders.add(entry.getKey(), (Iterable<?>) value); } else { preflightHeaders.add(entry.getKey(), value); } } return preflightHeaders; }
@Test public void emptyPreflightResponseHeaders() { final CorsConfig cors = forAnyOrigin().noPreflightResponseHeaders().build(); assertThat(cors.preflightResponseHeaders(), equalTo((HttpHeaders) EmptyHttpHeaders.INSTANCE)); }
Snapshot getOrCreateSnapshot(long epoch) { Snapshot last = head.prev(); if (last.epoch() > epoch) { throw new RuntimeException("Can't create a new in-memory snapshot at epoch " + epoch + " because there is already a snapshot with epoch " + last.epoch() + ". Snapshot epochs are " + epochsToString()); } else if (last.epoch() == epoch) { return last; } Snapshot snapshot = new Snapshot(epoch); last.appendNext(snapshot); snapshots.put(epoch, snapshot); log.debug("Creating in-memory snapshot {}", epoch); return snapshot; }
@Test public void testCreateSnapshotOfLatest() { SnapshotRegistry registry = new SnapshotRegistry(new LogContext()); registry.getOrCreateSnapshot(10); Snapshot latest = registry.getOrCreateSnapshot(12); Snapshot duplicate = registry.getOrCreateSnapshot(12); assertEquals(latest, duplicate); }
@Override public V put(K key, V value, Duration ttl) { return get(putAsync(key, value, ttl)); }
@Test public void testPutGetTTL() throws InterruptedException { RMapCacheNative<SimpleKey, SimpleValue> map = redisson.getMapCacheNative("simple04"); Assertions.assertNull(map.get(new SimpleKey("33"))); map.put(new SimpleKey("33"), new SimpleValue("44"), Duration.ofSeconds(2)); SimpleValue val1 = map.get(new SimpleKey("33")); Assertions.assertEquals("44", val1.getValue()); Thread.sleep(1000); Assertions.assertEquals(1, map.size()); SimpleValue val2 = map.get(new SimpleKey("33")); Assertions.assertEquals("44", val2.getValue()); Assertions.assertEquals(1, map.size()); Thread.sleep(1000); Assertions.assertNull(map.get(new SimpleKey("33"))); map.destroy(); }
@Override public PageResult<SensitiveWordDO> getSensitiveWordPage(SensitiveWordPageReqVO pageReqVO) { return sensitiveWordMapper.selectPage(pageReqVO); }
@Test public void testGetSensitiveWordPage() { // mock 数据 SensitiveWordDO dbSensitiveWord = randomPojo(SensitiveWordDO.class, o -> { // 等会查询到 o.setName("笨蛋"); o.setTags(Arrays.asList("论坛", "蔬菜")); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setCreateTime(buildTime(2022, 2, 8)); }); sensitiveWordMapper.insert(dbSensitiveWord); // 测试 name 不匹配 sensitiveWordMapper.insert(cloneIgnoreId(dbSensitiveWord, o -> o.setName("傻瓜"))); // 测试 tags 不匹配 sensitiveWordMapper.insert(cloneIgnoreId(dbSensitiveWord, o -> o.setTags(Arrays.asList("短信", "日用品")))); // 测试 createTime 不匹配 sensitiveWordMapper.insert(cloneIgnoreId(dbSensitiveWord, o -> o.setCreateTime(buildTime(2022, 2, 16)))); // 准备参数 SensitiveWordPageReqVO reqVO = new SensitiveWordPageReqVO(); reqVO.setName("笨"); reqVO.setTag("论坛"); reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus()); reqVO.setCreateTime(buildBetweenTime(2022, 2, 1, 2022, 2, 12)); // 调用 PageResult<SensitiveWordDO> pageResult = sensitiveWordService.getSensitiveWordPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbSensitiveWord, pageResult.getList().get(0)); }
@Override public RecordStore getRecordStore(int partitionId, String mapName) { return getPartitionContainer(partitionId).getRecordStore(mapName); }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void testGetRecordStoreWithSkipLoading_withGenericPartitionId() { mapServiceContext.getRecordStore(GENERIC_PARTITION_ID, "anyMap", true); }
@Override public TypeMapping createTypeMapping(IndexMainType mainType) { checkState(this.mainType == null, "Main type can only be defined once"); this.mainType = mainType; return super.createTypeMapping(mainType); }
@Test public void createTypeMapping_with_IndexRelationType_fails_with_IAE_if_mainType_does_not_match_defined_one() { Index index = Index.withRelations(SOME_INDEX_NAME); IndexType.IndexMainType mainType = IndexType.main(index, "foo"); NewRegularIndex underTest = new NewRegularIndex(index, defaultSettingsConfiguration); underTest.createTypeMapping(mainType); assertThatThrownBy(() -> underTest.createTypeMapping(IndexType.relation(IndexType.main(index, "donut"), "bar"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("main type of relation must be "+ mainType); }
private QueryParamsDataMap() { }
@Test public void testNumericKeyIndices() throws Exception { String testQS = "ids[0]=0&ids[1]=1&ids[2]=2"; DataMap queryParamDataMap = queryParamsDataMap(testQS); Object idsObj = queryParamDataMap.get("ids"); Assert.assertTrue(idsObj instanceof DataList); DataList ids = (DataList)idsObj; Assert.assertEquals(ids.get(0), "0"); Assert.assertEquals(ids.get(1), "1"); Assert.assertEquals(ids.get(2), "2"); }
@VisibleForTesting List<MappingRule> getMappingRules(MappingRulesDescription rules) { List<MappingRule> mappingRules = new ArrayList<>(); for (Rule rule : rules.getRules()) { checkMandatoryParameters(rule); MappingRuleMatcher matcher = createMatcher(rule); MappingRuleAction action = createAction(rule); setFallbackToAction(rule, action); MappingRule mappingRule = new MappingRule(matcher, action); mappingRules.add(mappingRule); } return mappingRules; }
@Test public void testMatchesUnset() { rule.setMatches(null); expected.expect(IllegalArgumentException.class); expected.expectMessage("Match string is undefined"); ruleCreator.getMappingRules(description); }
@Override public boolean contains(CharSequence name, CharSequence value) { return contains(name, value, false); }
@Test public void testPseudoHeadersWithRemovePreservesPseudoIterationOrder() { Http2Headers headers = newHeaders(); Http2Headers nonPseudoHeaders = new DefaultHttp2Headers(); for (Entry<CharSequence, CharSequence> entry : headers) { if (entry.getKey().length() == 0 || entry.getKey().charAt(0) != ':' && !nonPseudoHeaders.contains(entry.getKey())) { nonPseudoHeaders.add(entry.getKey(), entry.getValue()); } } assertFalse(nonPseudoHeaders.isEmpty()); // Remove all the non-pseudo headers and verify for (Entry<CharSequence, CharSequence> nonPseudoHeaderEntry : nonPseudoHeaders) { assertTrue(headers.remove(nonPseudoHeaderEntry.getKey())); verifyPseudoHeadersFirst(headers); verifyAllPseudoHeadersPresent(headers); } // Add back all non-pseudo headers for (Entry<CharSequence, CharSequence> nonPseudoHeaderEntry : nonPseudoHeaders) { headers.add(nonPseudoHeaderEntry.getKey(), of("goo")); verifyPseudoHeadersFirst(headers); verifyAllPseudoHeadersPresent(headers); } }
@Override public boolean equals(Object obj) { if (obj == this) { return true; } return obj instanceof RoundRobinDistributionSpec; }
@Test void testEquals() { DistributionSpec rr1 = new RoundRobinDistributionSpec(); DistributionSpec rr2 = new RoundRobinDistributionSpec(); assertEquals(rr1, rr2); }
@Override public List<ServiceCombServer> getUpdatedListOfServers() { final List<MicroServiceInstance> serverList = getRegisterCenterService() .getServerList(clientConfig.getClientName()); return serverList.stream().map(ServiceCombServer::new).collect(Collectors.toList()); }
@Test public void getUpdatedListOfServers() { final List<ServiceCombServer> initialListOfServers = serviceCombServiceList.getUpdatedListOfServers(); Assert.assertEquals(initialListOfServers.size(), instances.size()); }
public static IndicesBlockStatus parseBlockSettings(final GetSettingsResponse settingsResponse) { IndicesBlockStatus result = new IndicesBlockStatus(); final ImmutableOpenMap<String, Settings> indexToSettingsMap = settingsResponse.getIndexToSettings(); final String[] indicesInResponse = indexToSettingsMap.keys().toArray(String.class); for (String index : indicesInResponse) { final Settings blockSettings = indexToSettingsMap.get(index).getByPrefix(BLOCK_SETTINGS_PREFIX); if (!blockSettings.isEmpty()) { final Set<String> blockSettingsNames = blockSettings.names(); final Set<String> blockSettingsSetToTrue = blockSettingsNames.stream() .filter(s -> blockSettings.getAsBoolean(s, false)) .map(s -> BLOCK_SETTINGS_PREFIX + s) .collect(Collectors.toSet()); if (!blockSettingsSetToTrue.isEmpty()) { result.addIndexBlocks(index, blockSettingsSetToTrue); } } } return result; }
@Test public void parserProperlyResponseWithMultipleIndicesWithDifferentBlockSettings() { ImmutableOpenMap.Builder<String, Settings> settingsBuilder = new ImmutableOpenMap.Builder<>(); settingsBuilder.put("index_with_no_block_settings", Settings.builder().put("lalala", 42).build()); settingsBuilder.put("index_with_false_block_setting", Settings.builder().put("index.blocks.read_only", false).build()); settingsBuilder.put("index_with_true_block_setting", Settings.builder().put("index.blocks.read_only", true).build()); settingsBuilder.put("index_with_multiple_true_block_settings", Settings.builder() .put("index.blocks.read_only", true) .put("index.blocks.read_only_allow_delete", true) .build()); settingsBuilder.put("index_with_mixed_block_settings", Settings.builder() .put("index.blocks.read_only", false) .put("index.blocks.read_only_allow_delete", true) .build()); GetSettingsResponse settingsResponse = new GetSettingsResponse(settingsBuilder.build(), ImmutableOpenMap.of()); final IndicesBlockStatus indicesBlockStatus = BlockSettingsParser.parseBlockSettings(settingsResponse); assertNotNull(indicesBlockStatus); assertEquals(3, indicesBlockStatus.countBlockedIndices()); final Set<String> blockedIndices = indicesBlockStatus.getBlockedIndices(); assertFalse(blockedIndices.contains("index_with_no_block_settings")); assertFalse(blockedIndices.contains("index_with_false_block_setting")); assertTrue(blockedIndices.contains("index_with_true_block_setting")); Collection<String> indexBlocks = indicesBlockStatus.getIndexBlocks("index_with_true_block_setting"); assertEquals(1, indexBlocks.size()); assertTrue(indexBlocks.contains("index.blocks.read_only")); assertTrue(blockedIndices.contains("index_with_multiple_true_block_settings")); indexBlocks = indicesBlockStatus.getIndexBlocks("index_with_multiple_true_block_settings"); assertEquals(2, indexBlocks.size()); assertTrue(indexBlocks.contains("index.blocks.read_only")); assertTrue(indexBlocks.contains("index.blocks.read_only_allow_delete")); assertTrue(blockedIndices.contains("index_with_mixed_block_settings")); indexBlocks = indicesBlockStatus.getIndexBlocks("index_with_mixed_block_settings"); assertEquals(1, indexBlocks.size()); assertFalse(indexBlocks.contains("index.blocks.read_only")); assertTrue(indexBlocks.contains("index.blocks.read_only_allow_delete")); }
@Override public void updateProject(GoViewProjectUpdateReqVO updateReqVO) { // 校验存在 validateProjectExists(updateReqVO.getId()); // 更新 GoViewProjectDO updateObj = GoViewProjectConvert.INSTANCE.convert(updateReqVO); goViewProjectMapper.updateById(updateObj); }
@Test public void testUpdateProject_notExists() { // 准备参数 GoViewProjectUpdateReqVO reqVO = randomPojo(GoViewProjectUpdateReqVO.class); // 调用, 并断言异常 assertServiceException(() -> goViewProjectService.updateProject(reqVO), GO_VIEW_PROJECT_NOT_EXISTS); }
@VisibleForTesting Map<String, List<Operation>> computeOperations(SegmentDirectory.Reader segmentReader) throws Exception { Map<String, List<Operation>> columnOperationsMap = new HashMap<>(); // Does not work for segment versions < V3. if (_segmentDirectory.getSegmentMetadata().getVersion().compareTo(SegmentVersion.v3) < 0) { return columnOperationsMap; } Set<String> existingAllColumns = _segmentDirectory.getSegmentMetadata().getAllColumns(); Set<String> existingDictColumns = _segmentDirectory.getColumnsWithIndex(StandardIndexes.dictionary()); Set<String> existingForwardIndexColumns = _segmentDirectory.getColumnsWithIndex(StandardIndexes.forward()); for (String column : existingAllColumns) { if (_schema != null && !_schema.hasColumn(column)) { // _schema will be null only in tests LOGGER.info("Column {} is not in schema, skipping updating forward index", column); continue; } boolean existingHasDict = existingDictColumns.contains(column); boolean existingHasFwd = existingForwardIndexColumns.contains(column); FieldIndexConfigs newConf = _fieldIndexConfigs.get(column); boolean newIsFwd = newConf.getConfig(StandardIndexes.forward()).isEnabled(); boolean newIsDict = newConf.getConfig(StandardIndexes.dictionary()).isEnabled(); boolean newIsRange = newConf.getConfig(StandardIndexes.range()).isEnabled(); if (existingHasFwd && !newIsFwd) { // Existing column has a forward index. New column config disables the forward index ColumnMetadata columnMetadata = _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column); if (columnMetadata.isSorted()) { // Check if the column is sorted. If sorted, disabling forward index should be a no-op. Do not return an // operation for this column related to disabling forward index. LOGGER.warn("Trying to disable the forward index for a sorted column {}, ignoring", column); continue; } if (existingHasDict) { if (!newIsDict) { // Dictionary was also disabled. Just disable the dictionary and remove it along with the forward index // If range index exists, don't try to regenerate it on toggling the dictionary, throw an error instead Preconditions.checkState(!newIsRange, String.format( "Must disable range (enabled) index to disable the dictionary and forward index for column: %s or " + "refresh / back-fill the forward index", column)); columnOperationsMap.put(column, Arrays.asList(Operation.DISABLE_FORWARD_INDEX, Operation.DISABLE_DICTIONARY)); } else { // Dictionary is still enabled, keep it but remove the forward index columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_FORWARD_INDEX)); } } else { if (!newIsDict) { // Dictionary remains disabled and we should not reconstruct temporary forward index as dictionary based columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_FORWARD_INDEX)); } else { // Dictionary is enabled, creation of dictionary and conversion to dictionary based forward index is needed columnOperationsMap.put(column, Arrays.asList(Operation.DISABLE_FORWARD_INDEX, Operation.ENABLE_DICTIONARY)); } } } else if (!existingHasFwd && newIsFwd) { // Existing column does not have a forward index. New column config enables the forward index ColumnMetadata columnMetadata = _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column); if (columnMetadata != null && columnMetadata.isSorted()) { // Check if the column is sorted. If sorted, disabling forward index should be a no-op and forward index // should already exist. Do not return an operation for this column related to enabling forward index. LOGGER.warn("Trying to enable the forward index for a sorted column {}, ignoring", column); continue; } // Get list of columns with inverted index Set<String> existingInvertedIndexColumns = segmentReader.toSegmentDirectory().getColumnsWithIndex(StandardIndexes.inverted()); if (!existingHasDict || !existingInvertedIndexColumns.contains(column)) { // If either dictionary or inverted index is missing on the column there is no way to re-generate the forward // index. Treat this as a no-op and log a warning. LOGGER.warn("Trying to enable the forward index for a column {} missing either the dictionary ({}) and / or " + "the inverted index ({}) is not possible. Either a refresh or back-fill is required to get the " + "forward index, ignoring", column, existingHasDict ? "enabled" : "disabled", existingInvertedIndexColumns.contains(column) ? "enabled" : "disabled"); continue; } columnOperationsMap.put(column, Collections.singletonList(Operation.ENABLE_FORWARD_INDEX)); } else if (!existingHasFwd) { // Forward index is disabled for the existing column and should remain disabled based on the latest config // Need some checks to see whether the dictionary is being enabled or disabled here and take appropriate actions // If the dictionary is not enabled on the existing column it must be on the new noDictionary column list. // Cannot enable the dictionary for a column with forward index disabled. Preconditions.checkState(existingHasDict || !newIsDict, String.format("Cannot regenerate the dictionary for column %s with forward index disabled. Please " + "refresh or back-fill the data to add back the forward index", column)); if (existingHasDict && !newIsDict) { // Dictionary is currently enabled on this column but is supposed to be disabled. Remove the dictionary // and update the segment metadata If the range index exists then throw an error since we are not // regenerating the range index on toggling the dictionary Preconditions.checkState(!newIsRange, String.format( "Must disable range (enabled) index to disable the dictionary for a forwardIndexDisabled column: %s or " + "refresh / back-fill the forward index", column)); columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_DICTIONARY)); } } else if (!existingHasDict && newIsDict) { // Existing column is RAW. New column is dictionary enabled. if (_schema == null || _tableConfig == null) { // This can only happen in tests. LOGGER.warn("Cannot enable dictionary for column={} as schema or tableConfig is null.", column); continue; } ColumnMetadata existingColumnMetadata = _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column); if (DictionaryIndexType.ignoreDictionaryOverride(_tableConfig.getIndexingConfig().isOptimizeDictionary(), _tableConfig.getIndexingConfig().isOptimizeDictionaryForMetrics(), _tableConfig.getIndexingConfig().getNoDictionarySizeRatioThreshold(), existingColumnMetadata.getFieldSpec(), _fieldIndexConfigs.get(column), existingColumnMetadata.getCardinality(), existingColumnMetadata.getTotalNumberOfEntries())) { columnOperationsMap.put(column, Collections.singletonList(Operation.ENABLE_DICTIONARY)); } } else if (existingHasDict && !newIsDict) { // Existing column has dictionary. New config for the column is RAW. if (shouldDisableDictionary(column, _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column))) { columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_DICTIONARY)); } } else if (!existingHasDict) { // Both existing and new column is RAW forward index encoded. Check if compression needs to be changed. // TODO: Also check if raw index version needs to be changed if (shouldChangeRawCompressionType(column, segmentReader)) { columnOperationsMap.put(column, Collections.singletonList(Operation.CHANGE_INDEX_COMPRESSION_TYPE)); } } else { // Both existing and new column is dictionary encoded. Check if compression needs to be changed. if (shouldChangeDictIdCompressionType(column, segmentReader)) { columnOperationsMap.put(column, Collections.singletonList(Operation.CHANGE_INDEX_COMPRESSION_TYPE)); } } } return columnOperationsMap; }
@Test public void testComputeOperationDisableDictionary() throws Exception { // Setup SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); // TEST1: Disable dictionary for a raw column. Should be a no-op. IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); indexLoadingConfig.addNoDictionaryColumns(DIM_SNAPPY_INTEGER); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); Map<String, List<ForwardIndexHandler.Operation>> operationMap = fwdIndexHandler.computeOperations(writer); assertEquals(operationMap, Collections.EMPTY_MAP); // TEST2: Disable dictionary for a dictionary SV column. indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); indexLoadingConfig.addNoDictionaryColumns(DIM_DICT_INTEGER); fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); operationMap = fwdIndexHandler.computeOperations(writer); assertEquals(operationMap.get(DIM_DICT_INTEGER), Collections.singletonList(ForwardIndexHandler.Operation.DISABLE_DICTIONARY)); // TEST3: Disable dictionary for a dictionary MV column. indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); indexLoadingConfig.addNoDictionaryColumns(DIM_DICT_MV_BYTES); fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); operationMap = fwdIndexHandler.computeOperations(writer); assertEquals(operationMap.get(DIM_DICT_MV_BYTES), Collections.singletonList(ForwardIndexHandler.Operation.DISABLE_DICTIONARY)); // TEST4: Disable dictionary and enable inverted index. Should be a no-op. indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); indexLoadingConfig.addNoDictionaryColumns(DIM_DICT_STRING); indexLoadingConfig.addInvertedIndexColumns(DIM_DICT_STRING); fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); operationMap = fwdIndexHandler.computeOperations(writer); assertEquals(operationMap, Collections.EMPTY_MAP); // Tear down segmentLocalFSDirectory.close(); }
@VisibleForTesting boolean populateMessageSchema(MessageImpl msg, SendCallback callback) { MessageMetadata msgMetadataBuilder = msg.getMessageBuilder(); if (msg.getSchemaInternal() == schema) { schemaVersion.ifPresent(v -> msgMetadataBuilder.setSchemaVersion(v)); msg.setSchemaState(MessageImpl.SchemaState.Ready); return true; } // If the message is from the replicator and without replicated schema // Which means the message is written with BYTES schema // So we don't need to replicate schema to the remote cluster if (msg.hasReplicateFrom() && msg.getSchemaInfoForReplicator() == null) { msg.setSchemaState(MessageImpl.SchemaState.Ready); return true; } if (!isMultiSchemaEnabled(true)) { PulsarClientException.InvalidMessageException e = new PulsarClientException.InvalidMessageException( format("The producer %s of the topic %s is disabled the `MultiSchema`", producerName, topic) , msg.getSequenceId()); completeCallbackAndReleaseSemaphore(msg.getUncompressedSize(), callback, e); return false; } byte[] schemaVersion = schemaCache.get(msg.getSchemaHash()); if (schemaVersion != null) { if (schemaVersion != SchemaVersion.Empty.bytes()) { msgMetadataBuilder.setSchemaVersion(schemaVersion); } msg.setSchemaState(MessageImpl.SchemaState.Ready); } return true; }
@Test public void testPopulateMessageSchema() { MessageImpl<?> msg = mock(MessageImpl.class); when(msg.hasReplicateFrom()).thenReturn(true); when(msg.getSchemaInternal()).thenReturn(mock(Schema.class)); when(msg.getSchemaInfoForReplicator()).thenReturn(null); ProducerImpl<?> producer = mock(ProducerImpl.class, withSettings() .defaultAnswer(Mockito.CALLS_REAL_METHODS)); assertTrue(producer.populateMessageSchema(msg, null)); verify(msg).setSchemaState(MessageImpl.SchemaState.Ready); }
@Override public FileEntity upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency); try { // Full size of file final long size = status.getLength() + status.getOffset(); final List<Future<TransferStatus>> parts = new ArrayList<>(); long offset = 0; long remaining = status.getLength(); String ref = null; for(int partNumber = 1; remaining > 0; partNumber++) { final FileUploadPartEntity uploadPartEntity = this.continueUpload(file, ref, partNumber); final long length; if(uploadPartEntity.isParallelParts()) { length = Math.min(Math.max(size / (MAXIMUM_UPLOAD_PARTS - 1), partsize), remaining); } else { length = remaining; } parts.add(this.submit(pool, file, local, throttle, listener, status, uploadPartEntity.getUploadUri(), partNumber, offset, length, callback)); remaining -= length; offset += length; ref = uploadPartEntity.getRef(); } final List<TransferStatus> checksums = Interruptibles.awaitAll(parts); final FileEntity entity = this.completeUpload(file, ref, status, checksums); // Mark parent status as complete status.withResponse(new BrickAttributesFinderFeature(session).toAttributes(entity)).setComplete(); return entity; } finally { // Cancel future tasks pool.shutdown(false); } }
@Test public void testUploadSmallPart() throws Exception { final BrickUploadFeature feature = new BrickUploadFeature(session, new BrickWriteFeature(session), 5 * 1024L * 1024L, 2); final Path root = new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)); final String name = new AlphanumericRandomStringService().random(); final Path test = new Path(root, name, EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), name); final int length = 56; final byte[] content = RandomUtils.nextBytes(length); IOUtils.write(content, local.getOutputStream(false)); final TransferStatus status = new TransferStatus(); status.setLength(content.length); status.setMime("text/plain"); final BytecountStreamListener count = new BytecountStreamListener(); feature.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), count, status, new DisabledLoginCallback()); assertEquals(content.length, count.getSent()); assertTrue(status.isComplete()); assertNotSame(PathAttributes.EMPTY, status.getResponse()); assertTrue(new BrickFindFeature(session).find(test)); final PathAttributes attributes = new BrickAttributesFinderFeature(session).find(test); assertEquals(content.length, attributes.getSize()); final byte[] compare = new byte[length]; IOUtils.readFully(new BrickReadFeature(session).read(test, new TransferStatus().withLength(length), new DisabledConnectionCallback()), compare); assertArrayEquals(content, compare); new BrickDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); local.delete(); }
@Override public int getOrder() { return PluginEnum.TARS.getCode(); }
@Test public void testGetOrder() { int result = tarsPluginUnderTest.getOrder(); assertEquals(PluginEnum.TARS.getCode(), result); }
@Override public void commit() { TableMetadata base = ops.current(); TableMetadata newMetadata = internalApply(base); ops.commit(base, newMetadata); }
@TestTemplate public void testEmptyUpdateStatistics() { assertTableMetadataVersion(0); TableMetadata base = readMetadata(); table.updatePartitionStatistics().commit(); assertThat(table.ops().current()).isSameAs(base); assertTableMetadataVersion(1); }
private static String unescapeDocstring(String escaped) { // unescape "/*" and "*/" String commentUnescaped = escaped.replace("&#47;&#42;", "/*").replace("&#42;&#47;", "*/"); return StringEscapeUtils.unescapeHtml4(commentUnescaped); }
@Test public void testUnescapeDocstring() { String extracted = PdlParseUtils.extractMarkdown( " /**\n" + " * &lt;div&gt;Some html&lt;/div&gt;\n" + " * &#47;&#42; A comment &#42;&#47;\n" + " */\n"); assertEquals(extracted, "<div>Some html</div>\n" + "/* A comment */"); }
private boolean detectCharset(byte[] buf) throws IOException { ByteCharsetDetector detector = new ByteCharsetDetector(new CharsetValidation(), userEncoding); ByteOrderMark bom = detector.detectBOM(buf); if (bom != null) { detectedCharset = Charset.forName(bom.getCharsetName()); stream.skip(bom.length()); return true; } detectedCharset = detector.detect(buf); return detectedCharset != null; }
@Test public void fail_if_file_doesnt_exist() { assertThatThrownBy(() -> detectCharset(Paths.get("non_existing"), UTF_8)) .isInstanceOf(IllegalStateException.class) .hasMessage("Unable to read file " + Paths.get("non_existing").toAbsolutePath()); }
@Deprecated @Override public void init(final ProcessorContext context, final StateStore root) { this.context = asInternalProcessorContext(context); super.init(context, root); }
@SuppressWarnings("deprecation") @Test public void shouldDelegateDeprecatedInit() { store.init((ProcessorContext) context, store); verify(inner).init((ProcessorContext) context, store); }
public boolean createParent(String path) { return zkClient.createParent(path); }
@Test public void testCreateParent() { boolean result = zooKeeperBufferedClient.createParent(PARENT_PATH); Assert.assertTrue(result); }
public final void containsAnyIn(@Nullable Iterable<?> expected) { checkNotNull(expected); Collection<?> actual = iterableToCollection(checkNotNull(this.actual)); for (Object item : expected) { if (actual.contains(item)) { return; } } if (hasMatchingToStringPair(actual, expected)) { failWithoutActual( fact("expected to contain any of", countDuplicatesAndAddTypeInfo(expected)), simpleFact("but did not"), fact( "though it did contain", countDuplicatesAndAddTypeInfo( retainMatchingToString(checkNotNull(this.actual), /* itemsToCheck= */ expected))), fullContents()); } else { failWithActual("expected to contain any of", expected); } }
@Test public void iterableContainsAnyInIterable() { assertThat(asList(1, 2, 3)).containsAnyIn(asList(1, 10, 100)); expectFailureWhenTestingThat(asList(1, 2, 3)).containsAnyIn(asList(5, 6, 0)); assertFailureKeys("expected to contain any of", "but was"); assertFailureValue("expected to contain any of", "[5, 6, 0]"); }
@Override public void onResponse(Call call, okhttp3.Response okHttpResponse) { try { final Response response = OkHttpHttpClient.convertResponse(okHttpResponse); try { @SuppressWarnings("unchecked") final T t = converter == null ? (T) response : converter.convert(response); okHttpFuture.setResult(t); if (callback != null) { callback.onCompleted(t); } } catch (IOException | RuntimeException e) { okHttpFuture.setException(e); if (callback != null) { callback.onThrowable(e); } } } finally { okHttpFuture.finish(); } }
@Test public void shouldReleaseLatchOnSuccess() throws Exception { handler = new OAuthAsyncCompletionHandler<>(callback, ALL_GOOD_RESPONSE_CONVERTER, future); call.enqueue(handler); final Request request = new Request.Builder().url("http://localhost/").build(); final okhttp3.Response response = new okhttp3.Response.Builder() .request(request) .protocol(Protocol.HTTP_1_1) .code(200) .message("ok") .body(ResponseBody.create(new byte[0], MediaType.get("text/plain"))) .build(); handler.onResponse(call, response); assertNotNull(callback.getResponse()); assertNull(callback.getThrowable()); // verify latch is released assertEquals("All good", future.get()); }
public PDDocument extract() throws IOException { if (endPage - startPage + 1 <= 0) { return new PDDocument(); } Splitter splitter = new Splitter(); splitter.setStartPage(Math.max(startPage, 1)); splitter.setEndPage(Math.min(endPage, sourceDocument.getNumberOfPages())); splitter.setSplitAtPage(getEndPage() - getStartPage() + 1); List<PDDocument> splitted = splitter.split(sourceDocument); return splitted.get(0); }
@Test void testExtract() throws Exception { PDDocument sourcePdf = null; PDDocument result = null; try { // this should work for most users sourcePdf = Loader.loadPDF(new File("src/test/resources/input/cweb.pdf")); PageExtractor instance = new PageExtractor(sourcePdf); result = instance.extract(); assertEquals(sourcePdf.getNumberOfPages(), result.getNumberOfPages()); closeDoc(result); instance = new PageExtractor(sourcePdf, 1, 1); result = instance.extract(); assertEquals(1, result.getNumberOfPages()); closeDoc(result); instance = new PageExtractor(sourcePdf, 1, 5); result = instance.extract(); assertEquals(5, result.getNumberOfPages()); closeDoc(result); instance = new PageExtractor(sourcePdf, 5, 10); result = instance.extract(); assertEquals(6, result.getNumberOfPages()); closeDoc(result); instance = new PageExtractor(sourcePdf, 2, 1); result = instance.extract(); assertEquals(0, result.getNumberOfPages()); closeDoc(result); } finally { closeDoc(sourcePdf); closeDoc(result); } }
@Override public String getFileId(final Path file) throws BackgroundException { if(StringUtils.isNotBlank(file.attributes().getFileId())) { return file.attributes().getFileId(); } if(file.isRoot() || new SimplePathPredicate(file).test(DriveHomeFinderService.MYDRIVE_FOLDER) || new SimplePathPredicate(file).test(DriveHomeFinderService.SHARED_FOLDER_NAME) || new SimplePathPredicate(file).test(DriveHomeFinderService.SHARED_DRIVES_NAME)) { return DriveHomeFinderService.ROOT_FOLDER_ID; } final String cached = super.getFileId(file); if(cached != null) { if(log.isDebugEnabled()) { log.debug(String.format("Return cached fileid %s for file %s", cached, file)); } return cached; } if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(file.getParent())) { final Path found = new DriveTeamDrivesListService(session, this).list(file.getParent(), new DisabledListProgressListener()).find(new SimplePathPredicate(file) ); if(null == found) { throw new NotfoundException(file.getAbsolute()); } return this.cache(file, found.attributes().getFileId()); } final Path query; if(file.isPlaceholder()) { query = new Path(file.getParent(), FilenameUtils.removeExtension(file.getName()), file.getType(), file.attributes()); } else { query = file; } final AttributedList<Path> list = new FileidDriveListService(session, this, query).list(file.getParent(), new DisabledListProgressListener()); final Path found = list.filter(new IgnoreTrashedComparator()).find(new SimplePathPredicate(file)); if(null == found) { throw new NotfoundException(file.getAbsolute()); } return this.cache(file, found.attributes().getFileId()); }
@Test public void testGetFileidRoot() throws Exception { assertEquals("root", new DriveFileIdProvider(new DriveSession(new Host(new DriveProtocol(), ""), new DisabledX509TrustManager(), new DefaultX509KeyManager())) .getFileId(new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)))); assertEquals("root", new DriveFileIdProvider(new DriveSession(new Host(new DriveProtocol(), ""), new DisabledX509TrustManager(), new DefaultX509KeyManager())) .getFileId(new Path("/My Drive", EnumSet.of(Path.Type.directory, Path.Type.volume)))); }
public static Object convertValue(final Object value, final Class<?> convertType) throws SQLFeatureNotSupportedException { ShardingSpherePreconditions.checkNotNull(convertType, () -> new SQLFeatureNotSupportedException("Type can not be null")); if (null == value) { return convertNullValue(convertType); } if (value.getClass() == convertType) { return value; } if (value instanceof LocalDateTime) { return convertLocalDateTimeValue((LocalDateTime) value, convertType); } if (value instanceof Timestamp) { return convertTimestampValue((Timestamp) value, convertType); } if (URL.class.equals(convertType)) { return convertURL(value); } if (value instanceof Number) { return convertNumberValue(value, convertType); } if (value instanceof Date) { return convertDateValue((Date) value, convertType); } if (value instanceof byte[]) { return convertByteArrayValue((byte[]) value, convertType); } if (boolean.class.equals(convertType)) { return convertBooleanValue(value); } if (String.class.equals(convertType)) { return value.toString(); } try { return convertType.cast(value); } catch (final ClassCastException ignored) { throw new SQLFeatureNotSupportedException("getObject with type"); } }
@Test void assertConvertTimestampValue() throws SQLException { LocalDateTime localDateTime = LocalDateTime.of(2021, Month.DECEMBER, 23, 19, 30); Timestamp timestamp = Timestamp.valueOf(localDateTime); assertThat(ResultSetUtils.convertValue(timestamp, LocalDateTime.class), is(localDateTime)); assertThat(ResultSetUtils.convertValue(timestamp, LocalDate.class), is(LocalDate.of(2021, Month.DECEMBER, 23))); assertThat(ResultSetUtils.convertValue(timestamp, LocalTime.class), is(LocalTime.of(19, 30))); }
@Udf public Long round(@UdfParameter final long val) { return val; }
@Test public void shouldRoundBigDecimalWithDecimalPlacesPositive() { assertThat(udf.round(new BigDecimal("0"), 0), is(new BigDecimal("0"))); assertThat(udf.round(new BigDecimal("1.0"), 0), is(new BigDecimal("1.0"))); assertThat(udf.round(new BigDecimal("1.1"), 0), is(new BigDecimal("1.0"))); assertThat(udf.round(new BigDecimal("1.5"), 0), is(new BigDecimal("2.0"))); assertThat(udf.round(new BigDecimal("1.75"), 0), is(new BigDecimal("2.00"))); assertThat(udf.round(new BigDecimal("100.1"), 0),is(new BigDecimal("100.0"))); assertThat(udf.round(new BigDecimal("100.5"), 0), is(new BigDecimal("101.0"))); assertThat(udf.round(new BigDecimal("100.75"), 0), is(new BigDecimal("101.00"))); assertThat(udf.round(new BigDecimal("100.10"), 1), is(new BigDecimal("100.10"))); assertThat(udf.round(new BigDecimal("100.11"), 1), is(new BigDecimal("100.10"))); assertThat(udf.round(new BigDecimal("100.15"), 1), is(new BigDecimal("100.20"))); assertThat(udf.round(new BigDecimal("100.17"), 1), is(new BigDecimal("100.20"))); assertThat(udf.round(new BigDecimal("100.110"), 2), is(new BigDecimal("100.110"))); assertThat(udf.round(new BigDecimal("100.111"), 2), is(new BigDecimal("100.110"))); assertThat(udf.round(new BigDecimal("100.115"), 2), is(new BigDecimal("100.120"))); assertThat(udf.round(new BigDecimal("100.117"), 2), is(new BigDecimal("100.120"))); assertThat(udf.round(new BigDecimal("100.1110"), 3), is(new BigDecimal("100.1110"))); assertThat(udf.round(new BigDecimal("100.1111"), 3), is(new BigDecimal("100.1110"))); assertThat(udf.round(new BigDecimal("100.1115"), 3), is(new BigDecimal("100.1120"))); assertThat(udf.round(new BigDecimal("100.1117"), 3), is(new BigDecimal("100.1120"))); assertThat(udf.round(new BigDecimal("12345.67"), -1), is(new BigDecimal("12350.00"))); assertThat(udf.round(new BigDecimal("12345.67"), -2), is(new BigDecimal("12300.00"))); assertThat(udf.round(new BigDecimal("12345.67"), -3), is(new BigDecimal("12000.00"))); assertThat(udf.round(new BigDecimal("12345.67"), -4), is(new BigDecimal("10000.00"))); assertThat(udf.round(new BigDecimal("12345.67"), -5), is(new BigDecimal("0.00"))); }
public void publishArtifacts(List<ArtifactPlan> artifactPlans, EnvironmentVariableContext environmentVariableContext) { final File pluggableArtifactFolder = publishPluggableArtifacts(artifactPlans, environmentVariableContext); try { final List<ArtifactPlan> mergedPlans = artifactPlanFilter.getBuiltInMergedArtifactPlans(artifactPlans); if (isMetadataFolderEmpty(pluggableArtifactFolder)) { LOGGER.info("Pluggable metadata folder is empty."); } else if (pluggableArtifactFolder != null) { mergedPlans.add(0, new ArtifactPlan(ArtifactPlanType.file, format("%s%s*", pluggableArtifactFolder.getName(), File.separator), PLUGGABLE_ARTIFACT_METADATA_FOLDER)); } for (ArtifactPlan artifactPlan : mergedPlans) { try { artifactPlan.publishBuiltInArtifacts(goPublisher, workingDirectory); } catch (Exception e) { failedArtifact.add(artifactPlan); } } if (!failedArtifact.isEmpty()) { StringBuilder builder = new StringBuilder(); for (ArtifactPlan artifactPlan : failedArtifact) { artifactPlan.printArtifactInfo(builder); } throw new RuntimeException(format("[%s] Uploading finished. Failed to upload %s.", PRODUCT_NAME, builder)); } } finally { FileUtils.deleteQuietly(pluggableArtifactFolder); } }
@Test public void shouldReportErrorWithTestArtifactSrcWhenUploadFails() throws Exception { List<ArtifactPlan> artifactPlans = new ArrayList<>(); new DefaultJobPlan(new Resources(), artifactPlans, -1, null, null, new EnvironmentVariables(), new EnvironmentVariables(), null, null); artifactPlans.add(new ArtifactPlan(ArtifactPlanType.unit, "test1", "test")); artifactPlans.add(new ArtifactPlan(ArtifactPlanType.unit, "test2", "test")); prepareTestFolder(workingFolder, "test1"); prepareTestFolder(workingFolder, "test2"); publisher.setShouldFail(true); assertThatThrownBy(() -> artifactsPublisher.publishArtifacts(artifactPlans, env)) .hasMessageContaining("Failed to upload [test1, test2]"); }
public void verify(CvCertificate cert) { final Deque<CvCertificate> chain = getTrustChain(cert); // Only CVCA has domain parameters final ECDomainParameters params = chain.getLast().getBody().getPublicKey().getParams(); while (!chain.isEmpty()) { final CvCertificate signer = chain.pop(); signatureService.verify(cert, signer.getBody().getPublicKey(), params); cert = signer; } }
@Test public void shouldNotVerifyIfNotTrusted() { ClientException thrown = assertThrows(ClientException.class, () -> service.verify(readCvCertificate("rdw/acc/cvca.cvcert"))); assertEquals("Could not find trust chain", thrown.getMessage()); }
@Override public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) { SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof ShowFunctionStatusStatement) { return Optional.of(new ShowFunctionStatusExecutor((ShowFunctionStatusStatement) sqlStatement)); } if (sqlStatement instanceof ShowProcedureStatusStatement) { return Optional.of(new ShowProcedureStatusExecutor((ShowProcedureStatusStatement) sqlStatement)); } if (sqlStatement instanceof ShowTablesStatement) { return Optional.of(new ShowTablesExecutor((ShowTablesStatement) sqlStatement, sqlStatementContext.getDatabaseType())); } return Optional.empty(); }
@Test void assertCreateWithSelectStatementFromInformationSchemaOfDefaultExecutorTables() { initProxyContext(Collections.emptyMap()); SimpleTableSegment tableSegment = new SimpleTableSegment(new TableNameSegment(10, 13, new IdentifierValue("ENGINES"))); tableSegment.setOwner(new OwnerSegment(7, 8, new IdentifierValue("information_schema"))); MySQLSelectStatement selectStatement = mock(MySQLSelectStatement.class); when(selectStatement.getFrom()).thenReturn(Optional.of(tableSegment)); when(sqlStatementContext.getSqlStatement()).thenReturn(selectStatement); Optional<DatabaseAdminExecutor> actual = new MySQLAdminExecutorCreator().create(sqlStatementContext, "select ENGINE from ENGINES", "information_schema", Collections.emptyList()); assertTrue(actual.isPresent()); assertThat(actual.get(), instanceOf(DefaultDatabaseMetaDataExecutor.class)); }
@Deprecated @Override public Boolean hasAppendsOnly(org.apache.hadoop.hive.ql.metadata.Table hmsTable, SnapshotContext since) { TableDesc tableDesc = Utilities.getTableDesc(hmsTable); Table table = IcebergTableUtil.getTable(conf, tableDesc.getProperties()); return hasAppendsOnly(table.snapshots(), since); }
@Test public void testHasAppendsOnlyReturnsNullWhenTableIsEmpty() { SnapshotContext since = new SnapshotContext(42); HiveIcebergStorageHandler storageHandler = new HiveIcebergStorageHandler(); Boolean result = storageHandler.hasAppendsOnly(Collections.emptyList(), since); assertThat(result, is(nullValue())); }
@Override public List<SimpleColumn> toColumns( final ParsedSchema schema, final SerdeFeatures serdeFeatures, final boolean isKey) { SerdeUtils.throwOnUnsupportedFeatures(serdeFeatures, format.supportedFeatures()); Schema connectSchema = connectSrTranslator.toConnectSchema(schema); if (serdeFeatures.enabled(SerdeFeature.UNWRAP_SINGLES)) { connectSchema = SerdeUtils.wrapSingle(connectSchema, isKey); } if (connectSchema.type() != Type.STRUCT) { if (isKey) { throw new IllegalStateException("Key schemas are always unwrapped."); } throw new KsqlException("Schema returned from schema registry is anonymous type. " + "To use this schema with ksqlDB, set '" + CommonCreateConfigs.WRAP_SINGLE_VALUE + "=false' in the WITH clause properties."); } final Schema rowSchema = connectKsqlTranslator.toKsqlSchema(connectSchema); return rowSchema.fields().stream() .map(ConnectFormatSchemaTranslator::toColumn) .collect(Collectors.toList()); }
@Test public void shouldSupportBuildingColumnsFromPrimitiveValueSchema() { // Given: when(format.supportedFeatures()).thenReturn(Collections.singleton(SerdeFeature.UNWRAP_SINGLES)); // When: translator.toColumns(parsedSchema, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES), false); // Then: verify(connectKsqlTranslator).toKsqlSchema(SchemaBuilder.struct() .field("ROWVAL", connectSchema) .build()); }
public void complement() { bitSet.flip(0, partitionCount); resetSize(); }
@Test public void test_complement() { partitionIdSet = new PartitionIdSet(11, listOf(5, 6, 7, 8, 9, 10)); partitionIdSet.complement(); assertContents(partitionIdSet); }
@ScalarOperator(CAST) @LiteralParameters("x") @SqlType("varchar(x)") public static Slice castToVarchar(@LiteralParameter("x") long x, @SqlType(StandardTypes.BIGINT) long value) { // todo optimize me String stringValue = String.valueOf(value); // String is all-ASCII, so String.length() here returns actual code points count if (stringValue.length() <= x) { return utf8Slice(stringValue); } throw new PrestoException(INVALID_CAST_ARGUMENT, format("Value %s cannot be represented as varchar(%s)", value, x)); }
@Test public void testCastToVarchar() { assertFunction("cast(BIGINT '37' as varchar)", VARCHAR, "37"); assertFunction("cast(100000000017 as varchar)", VARCHAR, "100000000017"); assertFunction("cast(100000000017 as varchar(13))", createVarcharType(13), "100000000017"); assertFunction("cast(100000000017 as varchar(50))", createVarcharType(50), "100000000017"); assertInvalidCast("cast(100000000017 as varchar(2))", "Value 100000000017 cannot be represented as varchar(2)"); }
@Override public void info(String msg) { logger.info(msg); }
@Test public void testInfo() { Log mockLog = mock(Log.class); InternalLogger logger = new CommonsLogger(mockLog, "foo"); logger.info("a"); verify(mockLog).info("a"); }
public EndpointResponse streamQuery( final KsqlSecurityContext securityContext, final KsqlRequest request, final CompletableFuture<Void> connectionClosedFuture, final Optional<Boolean> isInternalRequest, final MetricsCallbackHolder metricsCallbackHolder, final Context context ) { throwIfNotConfigured(); activenessRegistrar.updateLastRequestTime(); final PreparedStatement<?> statement = parseStatement(request); CommandStoreUtil.httpWaitForCommandSequenceNumber( commandQueue, request, commandQueueCatchupTimeout); return handleStatement(securityContext, request, statement, connectionClosedFuture, isInternalRequest, metricsCallbackHolder, context); }
@Test public void shouldNotWaitIfCommandSequenceNumberSpecified() throws Exception { // When: testResource.streamQuery( securityContext, new KsqlRequest(PUSH_QUERY_STRING, Collections.emptyMap(), Collections.emptyMap(), null), new CompletableFuture<>(), Optional.empty(), new MetricsCallbackHolder(), context ); // Then: verify(commandQueue, never()).ensureConsumedPast(anyLong(), any()); }
public static Result label(long durationInMillis) { double nbSeconds = durationInMillis / 1000.0; double nbMinutes = nbSeconds / 60; double nbHours = nbMinutes / 60; double nbDays = nbHours / 24; double nbYears = nbDays / 365; return getMessage(nbSeconds, nbMinutes, nbHours, nbDays, nbYears); }
@Test public void age_in_day() { DurationLabel.Result result = DurationLabel.label(now() - ago(30 * HOUR)); assertThat(result.key()).isEqualTo("duration.day"); assertThat(result.value()).isNull(); }
public static void handle(Exception e, RetryContext context) throws Exception { if (e instanceof RemoteFileNotFoundException) { handleRemoteFileNotFound((RemoteFileNotFoundException) e, context); } else if (e instanceof RpcException) { handleRpcException((RpcException) e, context); } else if (e instanceof UserException) { handleUserException((UserException) e, context); } else { throw e; } }
@Test public void testHandleUseException_2() throws Exception { String sql = "select * from t0"; StatementBase statementBase = SqlParser.parse(sql, connectContext.getSessionVariable()).get(0); ExecPlan execPlan = getExecPlan(sql); ExecuteExceptionHandler.RetryContext retryContext = new ExecuteExceptionHandler.RetryContext(0, execPlan, connectContext, statementBase); Assert.assertThrows(UserException.class, () -> ExecuteExceptionHandler.handle(new UserException("other exception"), retryContext)); }
public TerminateDriverFlyweight tokenBuffer( final DirectBuffer tokenBuffer, final int tokenOffset, final int tokenLength) { buffer.putInt(offset + TOKEN_LENGTH_OFFSET, tokenLength); if (null != tokenBuffer && tokenLength > 0) { buffer.putBytes(offset + tokenBufferOffset(), tokenBuffer, tokenOffset, tokenLength); } return this; }
@Test void tokenBuffer() { final int offset = 24; final UnsafeBuffer buffer = new UnsafeBuffer(ByteBuffer.allocate(128)); buffer.setMemory(0, offset, (byte)15); final TerminateDriverFlyweight flyweight = new TerminateDriverFlyweight(); flyweight.wrap(buffer, offset); flyweight.tokenBuffer(newBuffer(16), 4, 8); assertEquals(8, flyweight.tokenBufferLength()); assertEquals(TOKEN_BUFFER_OFFSET, flyweight.tokenBufferOffset()); assertEquals(TOKEN_BUFFER_OFFSET + 8, flyweight.length()); }
@Override public void createFunction(SqlInvokedFunction function, boolean replace) { checkCatalog(function); checkFunctionLanguageSupported(function); checkArgument(!function.hasVersion(), "function '%s' is already versioned", function); QualifiedObjectName functionName = function.getFunctionId().getFunctionName(); checkFieldLength("Catalog name", functionName.getCatalogName(), MAX_CATALOG_NAME_LENGTH); checkFieldLength("Schema name", functionName.getSchemaName(), MAX_SCHEMA_NAME_LENGTH); if (!functionNamespaceDao.functionNamespaceExists(functionName.getCatalogName(), functionName.getSchemaName())) { throw new PrestoException(NOT_FOUND, format("Function namespace not found: %s", functionName.getCatalogSchemaName())); } checkFieldLength("Function name", functionName.getObjectName(), MAX_FUNCTION_NAME_LENGTH); if (function.getParameters().size() > MAX_PARAMETER_COUNT) { throw new PrestoException(GENERIC_USER_ERROR, format("Function has more than %s parameters: %s", MAX_PARAMETER_COUNT, function.getParameters().size())); } for (Parameter parameter : function.getParameters()) { checkFieldLength("Parameter name", parameter.getName(), MAX_PARAMETER_NAME_LENGTH); } checkFieldLength( "Parameter type list", function.getFunctionId().getArgumentTypes().stream() .map(TypeSignature::toString) .collect(joining(",")), MAX_PARAMETER_TYPES_LENGTH); checkFieldLength("Return type", function.getSignature().getReturnType().toString(), MAX_RETURN_TYPE_LENGTH); jdbi.useTransaction(handle -> { FunctionNamespaceDao transactionDao = handle.attach(functionNamespaceDaoClass); Optional<SqlInvokedFunctionRecord> latestVersion = transactionDao.getLatestRecordForUpdate(hash(function.getFunctionId()), function.getFunctionId()); if (!replace && latestVersion.isPresent() && !latestVersion.get().isDeleted()) { throw new PrestoException(ALREADY_EXISTS, "Function already exists: " + function.getFunctionId()); } if (!latestVersion.isPresent() || !latestVersion.get().getFunction().hasSameDefinitionAs(function)) { long newVersion = latestVersion.map(SqlInvokedFunctionRecord::getFunction).map(MySqlFunctionNamespaceManager::getLongVersion).orElse(0L) + 1; insertSqlInvokedFunction(transactionDao, function, newVersion); } else if (latestVersion.get().isDeleted()) { SqlInvokedFunction latest = latestVersion.get().getFunction(); checkState(latest.hasVersion(), "Function version missing: %s", latest.getFunctionId()); transactionDao.setDeletionStatus(hash(latest.getFunctionId()), latest.getFunctionId(), getLongVersion(latest), false); } }); refreshFunctionsCache(functionName); }
@Test public void testListFunction() { createFunctionNamespace(TEST_CATALOG, "schema1"); createFunctionNamespace(TEST_CATALOG, "schema2"); SqlInvokedFunction function1 = constructTestFunction(QualifiedObjectName.valueOf(TEST_CATALOG, "schema1", "power_tower")); SqlInvokedFunction function2 = constructTestFunction(QualifiedObjectName.valueOf(TEST_CATALOG, "schema2", "power_tower")); createFunction(function1, false); createFunction(function2, false); assertListFunctions(function1.withVersion("1"), function2.withVersion("1")); assertListFunctions(Optional.of(format("%s.%%", TEST_CATALOG)), Optional.empty(), function1.withVersion("1"), function2.withVersion("1")); assertListFunctions(Optional.of(format("%s.%s.%%", TEST_CATALOG, "schema1")), Optional.empty(), function1.withVersion("1")); assertListFunctions(Optional.of("%schema%"), Optional.empty(), function1.withVersion("1"), function2.withVersion("1")); assertListFunctions(Optional.of("%power$_tower"), Optional.of("$"), function1.withVersion("1"), function2.withVersion("1")); }
public static Write write() { // 1000 for batch size is good enough in many cases, // ex: if document size is large, around 10KB, the request's size will be around 10MB // if document size is small, around 1KB, the request's size will be around 1MB return new AutoValue_SolrIO_Write.Builder().setMaxBatchSize(1000).build(); }
@Test public void testWrite() throws Exception { List<SolrInputDocument> data = SolrIOTestUtils.createDocuments(NUM_DOCS); SolrIO.Write write = SolrIO.write().withConnectionConfiguration(connectionConfiguration).to(SOLR_COLLECTION); pipeline.apply(Create.of(data)).apply(write); pipeline.run(); long currentNumDocs = SolrIOTestUtils.commitAndGetCurrentNumDocs(SOLR_COLLECTION, solrClient); assertEquals(NUM_DOCS, currentNumDocs); QueryResponse response = solrClient.query(SOLR_COLLECTION, new SolrQuery("scientist:Lovelace")); assertEquals(NUM_DOCS / NUM_SCIENTISTS, response.getResults().getNumFound()); }
public static String findContainingJar(Class<?> clazz) { return findContainingResource(clazz.getClassLoader(), clazz.getName(), "jar"); }
@Test(timeout=10000) public void testFindContainingJar() { String containingJar = ClassUtil.findContainingJar(Assertions.class); Assertions .assertThat(containingJar) .describedAs("Containing jar for %s", Assertions.class) .isNotNull(); File jarFile = new File(containingJar); Assertions .assertThat(jarFile) .describedAs("Containing jar %s", jarFile) .exists(); Assertions .assertThat(jarFile.getName()) .describedAs("Containing jar name %s", jarFile.getName()) .matches("assertj-core.*[.]jar"); }
public boolean appliesTo(Component project, @Nullable MetricEvaluationResult metricEvaluationResult) { return metricEvaluationResult != null && metricEvaluationResult.evaluationResult.level() != Measure.Level.OK && METRICS_TO_IGNORE_ON_SMALL_CHANGESETS.contains(metricEvaluationResult.condition.getMetric().getKey()) && config.getConfiguration().getBoolean(CoreProperties.QUALITY_GATE_IGNORE_SMALL_CHANGES).orElse(true) && isSmallChangeset(project); }
@Test public void should_not_change_green_conditions() { QualityGateMeasuresStep.MetricEvaluationResult metricEvaluationResult = generateEvaluationResult(NEW_BUGS_KEY, OK); Component project = generateNewRootProject(); measureRepository.addRawMeasure(PROJECT_REF, CoreMetrics.NEW_LINES_KEY, newMeasureBuilder().create(19)); boolean result = underTest.appliesTo(project, metricEvaluationResult); assertThat(result).isFalse(); }
public boolean isAutoDbUpgrade() { return configuration.getBoolean(ProcessProperties.Property.AUTO_DATABASE_UPGRADE.getKey()).orElse(false); }
@Test public void isAutoDbUpgrade() { settings.clear(); assertThat(underTest.isAutoDbUpgrade()).isFalse(); settings.setProperty("sonar.autoDatabaseUpgrade", true); assertThat(underTest.isAutoDbUpgrade()).isTrue(); settings.setProperty("sonar.autoDatabaseUpgrade", false); assertThat(underTest.isAutoDbUpgrade()).isFalse(); }
@Override public void execute(String commandName, BufferedReader reader, BufferedWriter writer) throws Py4JException, IOException { String returnCommand = null; String subCommand = safeReadLine(reader, false); if (subCommand.equals(FIELD_GET_SUB_COMMAND_NAME)) { returnCommand = getField(reader); } else if (subCommand.equals(FIELD_SET_SUB_COMMAND_NAME)) { returnCommand = setField(reader); } else { returnCommand = Protocol.getOutputErrorCommand("Unknown Field SubCommand Name: " + subCommand); } logger.finest("Returning command: " + returnCommand); writer.write(returnCommand); writer.flush(); }
@Test public void testSetFieldObject() { String objectId = gateway.putNewObject(new StringBuffer("Hello")); String inputCommand = "s\n" + target + "\nfield20\nr" + objectId + "\ne\n"; try { command.execute("f", new BufferedReader(new StringReader(inputCommand)), writer); assertEquals("!yv\n", sWriter.toString()); assertEquals(((ExampleClass) gateway.getObject(target)).field20, gateway.getObject(objectId)); } catch (Exception e) { e.printStackTrace(); fail(); } }
@Override public void check(final DataSource dataSource) { try ( Connection connection = dataSource.getConnection(); PreparedStatement preparedStatement = connection.prepareStatement(SHOW_VARIABLES_SQL)) { int parameterIndex = 1; for (Entry<String, String> entry : REQUIRED_VARIABLES.entrySet()) { preparedStatement.setString(parameterIndex++, entry.getKey()); } try (ResultSet resultSet = preparedStatement.executeQuery()) { while (resultSet.next()) { String variableName = resultSet.getString(1).toUpperCase(); String expectedValue = REQUIRED_VARIABLES.get(variableName); String actualValue = resultSet.getString(2); ShardingSpherePreconditions.checkState(expectedValue.equalsIgnoreCase(actualValue), () -> new UnexpectedVariableValueException(variableName, expectedValue, actualValue)); } } } catch (final SQLException ex) { throw new CheckDatabaseEnvironmentFailedException(ex); } }
@Test void assertCheckFailure() throws SQLException { when(preparedStatement.executeQuery()).thenReturn(resultSet); when(resultSet.next()).thenThrow(new SQLException("")); assertThrows(CheckDatabaseEnvironmentFailedException.class, () -> variableChecker.check(dataSource)); }
public static void checkResourcePerms(List<String> resources) { if (resources == null || resources.isEmpty()) { return; } for (String resource : resources) { String[] items = StringUtils.split(resource, "="); if (items.length != 2) { throw new AclException(String.format("Parse Resource format error for %s.\n" + "The expected resource format is 'Res=Perm'. For example: topicA=SUB", resource)); } if (!AclConstants.DENY.equals(items[1].trim()) && Permission.DENY == Permission.parsePermFromString(items[1].trim())) { throw new AclException(String.format("Parse resource permission error for %s.\n" + "The expected permissions are 'SUB' or 'PUB' or 'SUB|PUB' or 'PUB|SUB'.", resource)); } } }
@Test(expected = AclException.class) public void checkResourcePermsExceptionTest1() { Permission.checkResourcePerms(Arrays.asList("topicA")); }
public static String encode(String plain) { Preconditions.checkNotNull(plain, "Cannot encode null object"); String encoded; try { encoded = URLEncoder.encode(plain, CHARSET); } catch (UnsupportedEncodingException uee) { throw new OAuthException("Charset not found while encoding string: " + CHARSET, uee); } for (Map.Entry<String, String> rule : ENCODING_RULES.entrySet()) { encoded = applyRule(encoded, rule.getKey(), rule.getValue()); } return encoded; }
@Test public void shouldPercentEncodeCorrectlyTwitterCodingExamples() { // These tests are part of the Twitter dev examples here // -> https://dev.twitter.com/docs/auth/percent-encoding-parameters final String[] sources = {"Ladies + Gentlemen", "An encoded string!", "Dogs, Cats & Mice"}; final String[] encoded = {"Ladies%20%2B%20Gentlemen", "An%20encoded%20string%21", "Dogs%2C%20Cats%20%26%20Mice"}; for (int i = 0; i < sources.length; i++) { assertEquals(encoded[i], OAuthEncoder.encode(sources[i])); } }
static Map<String, ValueExtractor> instantiateExtractors(List<AttributeConfig> attributeConfigs, ClassLoader classLoader) { Map<String, ValueExtractor> extractors = createHashMap(attributeConfigs.size()); for (AttributeConfig config : attributeConfigs) { if (extractors.containsKey(config.getName())) { throw new IllegalArgumentException("Could not add " + config + ". Extractor for this attribute name already added."); } extractors.put(config.getName(), instantiateExtractor(config, classLoader)); } return extractors; }
@Test public void instantiate_extractors_initException() { // GIVEN AttributeConfig string = new AttributeConfig("iq", "com.hazelcast.query.impl.getters.ExtractorHelperTest$InitExceptionExtractor"); // WHEN assertThatThrownBy(() -> instantiateExtractors(singletonList(string))) .isInstanceOf(IllegalArgumentException.class); }
@Override public void accept(Point newPoint) { //ensure this method is never called by multiple threads at the same time. parallelismDetector.run( () -> doAccept(newPoint) ); }
@Test public void testTrackClosure() { Duration TIME_LIMIT = Duration.ofSeconds(5); TestConsumer consumer = new TestConsumer(); TrackMaker maker = new TrackMaker(TIME_LIMIT, consumer); assertTrue( consumer.numCallsToAccept == 0, "The consumer has not been access yet" ); maker.accept(newPoint("track1", Instant.EPOCH)); maker.accept(newPoint("differentTrack", Instant.EPOCH.plus(TIME_LIMIT.plusSeconds(1)))); assertTrue( consumer.numCallsToAccept == 1, "track1 should be closed because the 2nd point is over the TIME_LIMIT" ); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); List<MemberInfo> memberInfoList = new ArrayList<>(); for (Map.Entry<String, Subscription> memberSubscription : subscriptions.entrySet()) { assignment.put(memberSubscription.getKey(), new ArrayList<>()); memberInfoList.add(new MemberInfo(memberSubscription.getKey(), memberSubscription.getValue().groupInstanceId())); } CircularIterator<MemberInfo> assigner = new CircularIterator<>(Utils.sorted(memberInfoList)); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek().memberId).topics().contains(topic)) assigner.next(); assignment.get(assigner.next().memberId).add(partition); } return assignment; }
@Test public void testOneConsumerOneTopic() { Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); }
public static SqlDecimal of(final int precision, final int scale) { return new SqlDecimal(precision, scale); }
@SuppressWarnings("UnstableApiUsage") @Test public void shouldImplementHashCodeAndEqualsProperly() { new EqualsTester() .addEqualityGroup(SqlDecimal.of(10, 2), SqlDecimal.of(10, 2)) .addEqualityGroup(SqlDecimal.of(11, 2)) .addEqualityGroup(SqlDecimal.of(10, 3)) .testEquals(); }
public List<Search> getAllForUser(SearchPermissions searchPermissions, Predicate<ViewDTO> viewReadPermission) { return dbService.streamAll() .filter(s -> hasReadPermissionFor(searchPermissions, viewReadPermission, s)) .collect(Collectors.toList()); }
@Test public void includesOwnedSearchesInList() { final String userName = "boeser-willi"; final Search ownedSearch = mockSearchWithOwner(userName); mockSearchWithOwner("someone else"); final SearchUser searchUser = mock(SearchUser.class); when(searchUser.owns(ownedSearch)).thenReturn(true); List<Search> result = sut.getAllForUser(searchUser, searchUser::canReadView); assertThat(result).containsExactly(ownedSearch); }
@Override public int read() throws IOException { if (advanceStream()) { final int value = buffer[bufferStart]; incrementRead(); return value; } return -1; }
@Test public void testRead_byteArr() throws Exception { byte[] b = new byte[9]; try (InputStream sample = new ByteArrayInputStream(sample2.getBytes()); JsonArrayFixingInputStream instance = new JsonArrayFixingInputStream(sample)) { int read = instance.read(b); assertEquals(2, read); assertEquals('[', b[0]); assertEquals('{', b[1]); read = instance.read(b); assertEquals(1, read); assertEquals('}', b[0]); read = instance.read(b); assertEquals(2, read); assertEquals(',', b[0]); assertEquals('{', b[1]); read = instance.read(b); assertEquals(1, read); assertEquals('}', b[0]); read = instance.read(b); assertEquals(1, read); assertEquals(']', b[0]); } }
public static Builder builder() { return new Builder(); }
@Test void equalsHashCodeAndToStringWork() { Target<TestInterface> t1 = new HardCodedTarget<>(TestInterface.class, "http://localhost:8080"); Target<TestInterface> t2 = new HardCodedTarget<>(TestInterface.class, "http://localhost:8888"); Target<OtherTestInterface> t3 = new HardCodedTarget<>(OtherTestInterface.class, "http://localhost:8080"); TestInterface i1 = Feign.builder().target(t1); TestInterface i2 = Feign.builder().target(t1); TestInterface i3 = Feign.builder().target(t2); OtherTestInterface i4 = Feign.builder().target(t3); assertThat(i1).isEqualTo(i2).isNotEqualTo(i3).isNotEqualTo(i4); assertThat(i1.hashCode()).isEqualTo(i2.hashCode()).isNotEqualTo(i3.hashCode()) .isNotEqualTo(i4.hashCode()); assertThat(i1.toString()).isEqualTo(i2.toString()).isNotEqualTo(i3.toString()) .isNotEqualTo(i4.toString()); assertThat(t1).isNotEqualTo(i1); assertThat(t1.hashCode()).isEqualTo(i1.hashCode()); assertThat(t1.toString()).isEqualTo(i1.toString()); }
@Override public void updateIndices(SegmentDirectory.Writer segmentWriter) throws Exception { Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter); if (columnOperationsMap.isEmpty()) { return; } for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) { String column = entry.getKey(); List<Operation> operations = entry.getValue(); for (Operation operation : operations) { switch (operation) { case DISABLE_FORWARD_INDEX: // Deletion of the forward index will be handled outside the index handler to ensure that other index // handlers that need the forward index to construct their own indexes will have it available. _tmpForwardIndexColumns.add(column); break; case ENABLE_FORWARD_INDEX: ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false); if (columnMetadata.hasDictionary()) { if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException(String.format( "Dictionary should still exist after rebuilding forward index for dictionary column: %s", column)); } } else { if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after rebuilding forward index for raw column: %s", column)); } } break; case DISABLE_DICTIONARY: Set<String> newForwardIndexDisabledColumns = FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(), _fieldIndexConfigs); if (newForwardIndexDisabledColumns.contains(column)) { removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter); if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after disabling dictionary for column: %s", column)); } } else { disableDictionaryAndCreateRawForwardIndex(column, segmentWriter); } break; case ENABLE_DICTIONARY: createDictBasedForwardIndex(column, segmentWriter); if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) { throw new IllegalStateException(String.format("Forward index was not created for column: %s", column)); } break; case CHANGE_INDEX_COMPRESSION_TYPE: rewriteForwardIndexForCompressionChange(column, segmentWriter); break; default: throw new IllegalStateException("Unsupported operation for column " + column); } } } }
@Test public void testEnableDictionaryForSingleColumn() throws Exception { IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); for (int i = 0; i < _noDictionaryColumns.size(); i++) { if (FORWARD_INDEX_DISABLED_RAW_COLUMNS.contains(_noDictionaryColumns.get(i))) { // Skip the RAW forward index disabled columns continue; } SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); String column = _noDictionaryColumns.get(i); indexLoadingConfig.removeNoDictionaryColumns(column); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); fwdIndexHandler.updateIndices(writer); fwdIndexHandler.postUpdateIndicesCleanup(writer); // Tear down before validation. Because columns.psf and index map cleanup happens at segmentDirectory.close() segmentLocalFSDirectory.close(); ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(column); testIndexExists(column, StandardIndexes.forward()); testIndexExists(column, StandardIndexes.dictionary()); validateIndexMap(column, true, false); validateForwardIndex(column, null, metadata.isSorted()); // In column metadata, nothing other than hasDictionary and dictionaryElementSize should change. int dictionaryElementSize = 0; FieldSpec.DataType dataType = metadata.getDataType(); if (dataType == FieldSpec.DataType.STRING || dataType == FieldSpec.DataType.BYTES) { // This value is based on the rows in createTestData(). dictionaryElementSize = 7; } else if (dataType == FieldSpec.DataType.BIG_DECIMAL) { dictionaryElementSize = 4; } validateMetadataProperties(column, true, dictionaryElementSize, metadata.getCardinality(), metadata.getTotalDocs(), dataType, metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), false); } }
@Override public PMML_MODEL getPMMLModelType() { logger.trace("getPMMLModelType"); return PMML_MODEL.TREE_MODEL; }
@Test void getPMMLModelType() { assertThat(PROVIDER.getPMMLModelType()).isEqualTo(PMML_MODEL.TREE_MODEL); }
@Operation(summary = "queryAuthorizedUser", description = "QUERY_AUTHORIZED_USER_NOTES") @Parameters({ @Parameter(name = "projectCode", description = "PROJECT_CODE", schema = @Schema(implementation = long.class, example = "100")) }) @GetMapping(value = "/authed-user") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_AUTHORIZED_USER) public Result queryAuthorizedUser(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("projectCode") Long projectCode) { return projectService.queryAuthorizedUser(loginUser, projectCode); }
@Test public void testQueryAuthorizedUser() { Result result = new Result(); this.putMsg(result, Status.SUCCESS); Mockito.when(this.projectService.queryAuthorizedUser(this.user, 3682329499136L)).thenReturn(result); Result response = this.projectController.queryAuthorizedUser(this.user, 3682329499136L); Assertions.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue()); }
public KsqlGenericRecord build( final List<ColumnName> columnNames, final List<Expression> expressions, final LogicalSchema schema, final DataSourceType dataSourceType ) { final List<ColumnName> columns = columnNames.isEmpty() ? implicitColumns(schema) : columnNames; if (columns.size() != expressions.size()) { throw new KsqlException( "Expected a value for each column." + " Expected Columns: " + columnNames + ". Got " + expressions); } final LogicalSchema schemaWithPseudoColumns = withPseudoColumns(schema); for (ColumnName col : columns) { if (!schemaWithPseudoColumns.findColumn(col).isPresent()) { throw new KsqlException("Column name " + col + " does not exist."); } if (SystemColumns.isDisallowedForInsertValues(col)) { throw new KsqlException("Inserting into column " + col + " is not allowed."); } } final Map<ColumnName, Object> values = resolveValues( columns, expressions, schemaWithPseudoColumns, functionRegistry, config ); if (dataSourceType == DataSourceType.KTABLE) { final String noValue = schemaWithPseudoColumns.key().stream() .map(Column::name) .filter(colName -> !values.containsKey(colName)) .map(ColumnName::text) .collect(Collectors.joining(", ")); if (!noValue.isEmpty()) { throw new KsqlException("Value for primary key column(s) " + noValue + " is required for tables"); } } final long ts = (long) values.getOrDefault(SystemColumns.ROWTIME_NAME, clock.getAsLong()); final GenericKey key = buildKey(schema, values); final GenericRow value = buildValue(schema, values); return KsqlGenericRecord.of(key, value, ts); }
@Test public void shouldUseClockTime() { // Given: final LogicalSchema schema = LogicalSchema.builder() .keyColumn(KEY, SqlTypes.STRING) .valueColumn(COL0, SqlTypes.STRING) .build(); final List<ColumnName> names = ImmutableList.of(KEY, COL0); final Expression exp = new StringLiteral("a"); clock.set(1L); // When: final KsqlGenericRecord record = recordFactory.build( names, ImmutableList.of(exp, exp), schema, DataSourceType.KSTREAM ); // Then: assertThat(record, is(KsqlGenericRecord.of( GenericKey.genericKey("a"), GenericRow.genericRow("a"), 1 ))); }
@Override public PostgreSQLPacket getQueryRowPacket() throws SQLException { return new PostgreSQLDataRowPacket(proxyBackendHandler.getRowData().getData()); }
@Test void assertGetQueryRowPacket() throws SQLException { when(proxyBackendHandler.getRowData()).thenReturn(new QueryResponseRow(Collections.emptyList())); PostgreSQLPacket actual = queryExecutor.getQueryRowPacket(); assertThat(actual, is(instanceOf(PostgreSQLDataRowPacket.class))); }