focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public String defaultRemoteUrl() { final String sanitizedUrl = sanitizeUrl(); try { URI uri = new URI(sanitizedUrl); if (uri.getUserInfo() != null) { uri = new URI(uri.getScheme(), removePassword(uri.getUserInfo()), uri.getHost(), uri.getPort(), uri.getPath(), uri.getQuery(), uri.getFragment()); return uri.toString(); } } catch (URISyntaxException e) { return sanitizedUrl; } return sanitizedUrl; }
@Test void shouldNotModifyWindowsFileSystemPath() { assertThat(new HgUrlArgument("c:\\foobar").defaultRemoteUrl(), is("c:\\foobar")); }
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, Timer timer) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) { // We guarantee that the callbacks for all commitAsync() will be invoked when // commitSync() completes, even if the user tries to commit empty offsets. return invokePendingAsyncCommits(timer); } long attempts = 0L; do { if (coordinatorUnknownAndUnreadySync(timer)) { return false; } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, timer); // We may have had in-flight offset commits when the synchronous commit began. If so, ensure that // the corresponding callbacks are invoked prior to returning in order to preserve the order that // the offset commits were applied. invokeCompletedOffsetCommitCallbacks(); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (future.failed() && !future.isRetriable()) throw future.exception(); timer.sleep(retryBackoff.backoff(attempts++)); } while (timer.notExpired()); return false; }
@Test public void testCommitOffsetSyncNotCoordinator() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); // sync commit with coordinator disconnected (should connect, get metadata, and then submit the commit request) prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NOT_COORDINATOR); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE); coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE)); }
@Override public boolean isWriteable(Class<?> type, @Nullable Type genericType, @Nullable Annotation[] annotations, @Nullable MediaType mediaType) { return isProvidable(type) && super.isWriteable(type, genericType, annotations, mediaType); }
@Test void doesNotWriteIgnoredTypes() { assertThat(provider.isWriteable(Ignorable.class, null, null, null)) .isFalse(); }
public static List<ClientMessage> getFragments(int maxFrameSize, ClientMessage clientMessage) { if (clientMessage.getFrameLength() <= maxFrameSize) { return Collections.singletonList(clientMessage); } long fragmentId = FRAGMENT_ID_SEQUENCE.next(); LinkedList<ClientMessage> fragments = new LinkedList<>(); ClientMessage.ForwardFrameIterator iterator = clientMessage.frameIterator(); ReadState state = ReadState.BEGINNING; int length = 0; ClientMessage fragment = null; while (iterator.hasNext()) { ClientMessage.Frame frame = iterator.peekNext(); int frameSize = frame.getSize(); length += frameSize; if (frameSize > maxFrameSize) { iterator.next(); if (state == ReadState.MIDDLE) { fragments.add(fragment); } fragment = createFragment(fragmentId); fragment.add(frame.copy()); fragments.add(fragment); state = ReadState.BEGINNING; length = 0; } else if (length <= maxFrameSize) { iterator.next(); if (state == ReadState.BEGINNING) { fragment = createFragment(fragmentId); } fragment.add(frame.copy()); state = ReadState.MIDDLE; } else { assert state == ReadState.MIDDLE; fragments.add(fragment); state = ReadState.BEGINNING; length = 0; } } if (state == ReadState.MIDDLE) { fragments.add(fragment); } fragments.getFirst().getStartFrame().flags |= BEGIN_FRAGMENT_FLAG; fragments.getLast().getStartFrame().flags |= END_FRAGMENT_FLAG; return fragments; }
@Test @RequireAssertEnabled public void testGetSubFrame_whenFrameSizeGreaterThanFrameLength_thenReturnOriginalMessage() { List<ClientMessage> fragments = getFragments(4000, clientMessage); ClientMessage.ForwardFrameIterator originalIterator = clientMessage.frameIterator(); assertFragments(fragments, originalIterator); }
@Override public double mean() { return n * p; }
@Test public void testMean() { System.out.println("mean"); BinomialDistribution instance = new BinomialDistribution(100, 0.3); instance.rand(); assertEquals(30.0, instance.mean(), 1E-7); }
public static void extractJoinConditions(final Collection<BinaryOperationExpression> joinConditions, final Collection<WhereSegment> whereSegments) { for (WhereSegment each : whereSegments) { if (each.getExpr() instanceof BinaryOperationExpression && ((BinaryOperationExpression) each.getExpr()).getLeft() instanceof ColumnSegment && ((BinaryOperationExpression) each.getExpr()).getRight() instanceof ColumnSegment) { joinConditions.add((BinaryOperationExpression) each.getExpr()); } } }
@Test void assertExtractJoinConditions() { Collection<BinaryOperationExpression> actual = new LinkedList<>(); BinaryOperationExpression binaryExpression = new BinaryOperationExpression(0, 0, new ColumnSegment(0, 0, new IdentifierValue("order_id")), new ColumnSegment(0, 0, new IdentifierValue("order_id")), "=", ""); ExpressionExtractUtils.extractJoinConditions(actual, Collections.singleton(new WhereSegment(0, 0, binaryExpression))); assertThat(actual.size(), is(1)); assertThat(actual.iterator().next(), is(binaryExpression)); }
public static RocksDbIndexedTimeOrderedWindowBytesStoreSupplier create(final String name, final Duration retentionPeriod, final Duration windowSize, final boolean retainDuplicates, final boolean hasIndex) { Objects.requireNonNull(name, "name cannot be null"); final String rpMsgPrefix = prepareMillisCheckFailMsgPrefix(retentionPeriod, "retentionPeriod"); final long retentionMs = validateMillisecondDuration(retentionPeriod, rpMsgPrefix); final String wsMsgPrefix = prepareMillisCheckFailMsgPrefix(windowSize, "windowSize"); final long windowSizeMs = validateMillisecondDuration(windowSize, wsMsgPrefix); final long defaultSegmentInterval = Math.max(retentionMs / 2, 60_000L); if (retentionMs < 0L) { throw new IllegalArgumentException("retentionPeriod cannot be negative"); } if (windowSizeMs < 0L) { throw new IllegalArgumentException("windowSize cannot be negative"); } if (defaultSegmentInterval < 1L) { throw new IllegalArgumentException("segmentInterval cannot be zero or negative"); } if (windowSizeMs > retentionMs) { throw new IllegalArgumentException("The retention period of the window store " + name + " must be no smaller than its window size. Got size=[" + windowSizeMs + "], retention=[" + retentionMs + "]"); } return new RocksDbIndexedTimeOrderedWindowBytesStoreSupplier(name, retentionMs, defaultSegmentInterval, windowSizeMs, retainDuplicates, hasIndex); }
@Test public void shouldThrowIfStoreNameIsNull() { final Exception e = assertThrows(NullPointerException.class, () -> RocksDbIndexedTimeOrderedWindowBytesStoreSupplier.create(null, ZERO, ZERO, false, false)); assertEquals("name cannot be null", e.getMessage()); }
@Override public synchronized ScheduleResult schedule() { dropListenersFromWhenFinishedOrNewLifespansAdded(); int overallSplitAssignmentCount = 0; ImmutableSet.Builder<RemoteTask> overallNewTasks = ImmutableSet.builder(); List<ListenableFuture<?>> overallBlockedFutures = new ArrayList<>(); boolean anyBlockedOnPlacements = false; boolean anyBlockedOnNextSplitBatch = false; boolean anyNotBlocked = false; for (Entry<Lifespan, ScheduleGroup> entry : scheduleGroups.entrySet()) { Lifespan lifespan = entry.getKey(); ScheduleGroup scheduleGroup = entry.getValue(); if (scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS || scheduleGroup.state == ScheduleGroupState.DONE) { verify(scheduleGroup.nextSplitBatchFuture == null); } else if (scheduleGroup.pendingSplits.isEmpty()) { // try to get the next batch if (scheduleGroup.nextSplitBatchFuture == null) { scheduleGroup.nextSplitBatchFuture = splitSource.getNextBatch(scheduleGroup.partitionHandle, lifespan, splitBatchSize); long start = System.nanoTime(); addSuccessCallback(scheduleGroup.nextSplitBatchFuture, () -> stage.recordGetSplitTime(start)); } if (scheduleGroup.nextSplitBatchFuture.isDone()) { SplitBatch nextSplits = getFutureValue(scheduleGroup.nextSplitBatchFuture); scheduleGroup.nextSplitBatchFuture = null; scheduleGroup.pendingSplits = new HashSet<>(nextSplits.getSplits()); if (nextSplits.isLastBatch()) { if (scheduleGroup.state == ScheduleGroupState.INITIALIZED && scheduleGroup.pendingSplits.isEmpty()) { // Add an empty split in case no splits have been produced for the source. // For source operators, they never take input, but they may produce output. // This is well handled by Presto execution engine. // However, there are certain non-source operators that may produce output without any input, // for example, 1) an AggregationOperator, 2) a HashAggregationOperator where one of the grouping sets is (). // Scheduling an empty split kicks off necessary driver instantiation to make this work. scheduleGroup.pendingSplits.add(new Split( splitSource.getConnectorId(), splitSource.getTransactionHandle(), new EmptySplit(splitSource.getConnectorId()), lifespan, NON_CACHEABLE)); } scheduleGroup.state = ScheduleGroupState.NO_MORE_SPLITS; } } else { overallBlockedFutures.add(scheduleGroup.nextSplitBatchFuture); anyBlockedOnNextSplitBatch = true; continue; } } Multimap<InternalNode, Split> splitAssignment = ImmutableMultimap.of(); if (!scheduleGroup.pendingSplits.isEmpty()) { if (!scheduleGroup.placementFuture.isDone()) { anyBlockedOnPlacements = true; continue; } if (scheduleGroup.state == ScheduleGroupState.INITIALIZED) { scheduleGroup.state = ScheduleGroupState.SPLITS_ADDED; } if (state == State.INITIALIZED) { state = State.SPLITS_ADDED; } // calculate placements for splits SplitPlacementResult splitPlacementResult = splitPlacementPolicy.computeAssignments(scheduleGroup.pendingSplits); splitAssignment = splitPlacementResult.getAssignments(); // remove splits with successful placements splitAssignment.values().forEach(scheduleGroup.pendingSplits::remove); // AbstractSet.removeAll performs terribly here. overallSplitAssignmentCount += splitAssignment.size(); // if not completed placed, mark scheduleGroup as blocked on placement if (!scheduleGroup.pendingSplits.isEmpty()) { scheduleGroup.placementFuture = splitPlacementResult.getBlocked(); overallBlockedFutures.add(scheduleGroup.placementFuture); anyBlockedOnPlacements = true; } } // if no new splits will be assigned, update state and attach completion event Multimap<InternalNode, Lifespan> noMoreSplitsNotification = ImmutableMultimap.of(); if (scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS) { scheduleGroup.state = ScheduleGroupState.DONE; if (!lifespan.isTaskWide()) { InternalNode node = ((BucketedSplitPlacementPolicy) splitPlacementPolicy).getNodeForBucket(lifespan.getId()); noMoreSplitsNotification = ImmutableMultimap.of(node, lifespan); } } // assign the splits with successful placements overallNewTasks.addAll(assignSplits(splitAssignment, noMoreSplitsNotification)); // Assert that "placement future is not done" implies "pendingSplits is not empty". // The other way around is not true. One obvious reason is (un)lucky timing, where the placement is unblocked between `computeAssignments` and this line. // However, there are other reasons that could lead to this. // Note that `computeAssignments` is quite broken: // 1. It always returns a completed future when there are no tasks, regardless of whether all nodes are blocked. // 2. The returned future will only be completed when a node with an assigned task becomes unblocked. Other nodes don't trigger future completion. // As a result, to avoid busy loops caused by 1, we check pendingSplits.isEmpty() instead of placementFuture.isDone() here. if (scheduleGroup.nextSplitBatchFuture == null && scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state != ScheduleGroupState.DONE) { anyNotBlocked = true; } } // * `splitSource.isFinished` invocation may fail after `splitSource.close` has been invoked. // If state is NO_MORE_SPLITS/FINISHED, splitSource.isFinished has previously returned true, and splitSource is closed now. // * Even if `splitSource.isFinished()` return true, it is not necessarily safe to tear down the split source. // * If anyBlockedOnNextSplitBatch is true, it means we have not checked out the recently completed nextSplitBatch futures, // which may contain recently published splits. We must not ignore those. // * If any scheduleGroup is still in DISCOVERING_SPLITS state, it means it hasn't realized that there will be no more splits. // Next time it invokes getNextBatch, it will realize that. However, the invocation will fail we tear down splitSource now. // // Since grouped execution is going to support failure recovery, and scheduled splits might have to be rescheduled during retry, // we can no longer claim schedule is complete after all splits are scheduled. // Splits schedule can only be considered as finished when all lifespan executions are done // (by calling `notifyAllLifespansFinishedExecution`) if ((state == State.NO_MORE_SPLITS || state == State.FINISHED) || (!groupedExecution && lifespanAdded && scheduleGroups.isEmpty() && splitSource.isFinished())) { switch (state) { case INITIALIZED: // We have not scheduled a single split so far. // But this shouldn't be possible. See usage of EmptySplit in this method. throw new IllegalStateException("At least 1 split should have been scheduled for this plan node"); case SPLITS_ADDED: state = State.NO_MORE_SPLITS; splitSource.close(); // fall through case NO_MORE_SPLITS: state = State.FINISHED; whenFinishedOrNewLifespanAdded.set(null); // fall through case FINISHED: return ScheduleResult.nonBlocked( true, overallNewTasks.build(), overallSplitAssignmentCount); default: throw new IllegalStateException("Unknown state"); } } if (anyNotBlocked) { return ScheduleResult.nonBlocked(false, overallNewTasks.build(), overallSplitAssignmentCount); } if (anyBlockedOnPlacements) { // In a broadcast join, output buffers of the tasks in build source stage have to // hold onto all data produced before probe side task scheduling finishes, // even if the data is acknowledged by all known consumers. This is because // new consumers may be added until the probe side task scheduling finishes. // // As a result, the following line is necessary to prevent deadlock // due to neither build nor probe can make any progress. // The build side blocks due to a full output buffer. // In the meantime the probe side split cannot be consumed since // builder side hash table construction has not finished. // // TODO: When SourcePartitionedScheduler is used as a SourceScheduler, it shouldn't need to worry about // task scheduling and creation -- these are done by the StageScheduler. overallNewTasks.addAll(finalizeTaskCreationIfNecessary()); } ScheduleResult.BlockedReason blockedReason; if (anyBlockedOnNextSplitBatch) { blockedReason = anyBlockedOnPlacements ? MIXED_SPLIT_QUEUES_FULL_AND_WAITING_FOR_SOURCE : WAITING_FOR_SOURCE; } else { blockedReason = anyBlockedOnPlacements ? SPLIT_QUEUES_FULL : NO_ACTIVE_DRIVER_GROUP; } overallBlockedFutures.add(whenFinishedOrNewLifespanAdded); return ScheduleResult.blocked( false, overallNewTasks.build(), nonCancellationPropagating(whenAnyComplete(overallBlockedFutures)), blockedReason, overallSplitAssignmentCount); }
@Test public void testBlockCausesFullSchedule() { NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService); // Schedule 60 splits - filling up all nodes SubPlan firstPlan = createPlan(); SqlStageExecution firstStage = createSqlStageExecution(firstPlan, nodeTaskMap); StageScheduler firstScheduler = getSourcePartitionedScheduler(createFixedSplitSource(60, TestingSplit::createRemoteSplit), firstStage, nodeManager, nodeTaskMap, 200); ScheduleResult scheduleResult = firstScheduler.schedule(); assertEffectivelyFinished(scheduleResult, firstScheduler); assertTrue(scheduleResult.getBlocked().isDone()); assertEquals(scheduleResult.getNewTasks().size(), 3); assertEquals(firstStage.getAllTasks().size(), 3); for (RemoteTask remoteTask : firstStage.getAllTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); assertEquals(splitsInfo.getCount(), 20); } // Schedule more splits in another query, which will block since all nodes are full SubPlan secondPlan = createPlan(); SqlStageExecution secondStage = createSqlStageExecution(secondPlan, nodeTaskMap); StageScheduler secondScheduler = getSourcePartitionedScheduler(createFixedSplitSource(5, TestingSplit::createRemoteSplit), secondStage, nodeManager, nodeTaskMap, 200); scheduleResult = secondScheduler.schedule(); assertFalse(scheduleResult.isFinished()); assertTrue(scheduleResult.getBlocked().isDone()); assertEquals(scheduleResult.getNewTasks().size(), 3); assertEquals(secondStage.getAllTasks().size(), 3); for (RemoteTask remoteTask : secondStage.getAllTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); assertEquals(splitsInfo.getCount(), 0); } firstStage.abort(); secondStage.abort(); }
@Override public boolean isSupported() { return true; }
@Test public void isSupported() { MeizuImpl meizu = new MeizuImpl(mApplication); Assert.assertTrue(meizu.isSupported()); }
static KiePMMLDiscretize getKiePMMLDiscretize(final Discretize discretize) { List<KiePMMLDiscretizeBin> discretizeBins = discretize.hasDiscretizeBins() ? getKiePMMLDiscretizeBins(discretize.getDiscretizeBins()) : Collections.emptyList(); String mapMissingTo = discretize.getMapMissingTo() != null ? discretize.getMapMissingTo().toString() : null; String defaultValue = discretize.getDefaultValue() != null ? discretize.getDefaultValue().toString() : null; DATA_TYPE dataType = discretize.getDataType() != null ? DATA_TYPE.byName(discretize.getDataType().value()) : null; return new KiePMMLDiscretize(discretize.getField(), getKiePMMLExtensions(discretize.getExtensions()), discretizeBins, mapMissingTo, defaultValue, dataType); }
@Test void getKiePMMLDiscretize() { Discretize toConvert = getRandomDiscretize(); KiePMMLDiscretize retrieved = KiePMMLDiscretizeInstanceFactory.getKiePMMLDiscretize(toConvert); commonVerifyKiePMMLDiscretize(retrieved, toConvert); }
public void processTable() { String entityName = entity.getNameConvert().entityNameConvert(this); this.setEntityName(entity.getConverterFileName().convert(entityName)); this.mapperName = strategyConfig.mapper().getConverterMapperFileName().convert(entityName); this.xmlName = strategyConfig.mapper().getConverterXmlFileName().convert(entityName); this.serviceName = strategyConfig.service().getConverterServiceFileName().convert(entityName); this.serviceImplName = strategyConfig.service().getConverterServiceImplFileName().convert(entityName); this.controllerName = strategyConfig.controller().getConverterFileName().convert(entityName); this.importPackage(); }
@Test void processTableTest() { StrategyConfig strategyConfig = GeneratorBuilder.strategyConfig(); TableInfo tableInfo = new TableInfo(new ConfigBuilder(GeneratorBuilder.packageConfig(), dataSourceConfig, strategyConfig, null, GeneratorBuilder.globalConfig(), null), "user"); tableInfo.processTable(); Assertions.assertFalse(tableInfo.isConvert()); Assertions.assertEquals("UserMapper", tableInfo.getMapperName()); Assertions.assertEquals("UserMapper", tableInfo.getXmlName()); Assertions.assertEquals("IUserService", tableInfo.getServiceName()); Assertions.assertEquals("UserServiceImpl", tableInfo.getServiceImplName()); Assertions.assertEquals("UserController", tableInfo.getControllerName()); strategyConfig = GeneratorBuilder.strategyConfig(); strategyConfig.entityBuilder().formatFileName("%sEntity") .mapperBuilder().formatMapperFileName("%sDao").formatXmlFileName("%sXml") .controllerBuilder().formatFileName("%sAction") .serviceBuilder().formatServiceFileName("%sService").formatServiceImplFileName("%sServiceImp"); tableInfo = new TableInfo(new ConfigBuilder(GeneratorBuilder.packageConfig(), dataSourceConfig, strategyConfig, null, null, null), "user"); tableInfo.processTable(); Assertions.assertTrue(tableInfo.isConvert()); Assertions.assertEquals("UserEntity", tableInfo.getEntityName()); Assertions.assertEquals("UserDao", tableInfo.getMapperName()); Assertions.assertEquals("UserXml", tableInfo.getXmlName()); Assertions.assertEquals("UserService", tableInfo.getServiceName()); Assertions.assertEquals("UserServiceImp", tableInfo.getServiceImplName()); Assertions.assertEquals("UserAction", tableInfo.getControllerName()); strategyConfig = GeneratorBuilder.strategyConfig(); strategyConfig.entityBuilder().nameConvert(new INameConvert() { @Override public @NotNull String entityNameConvert(@NotNull TableInfo tableInfo) { return "E" + tableInfo.getName(); } @Override public @NotNull String propertyNameConvert(@NotNull TableField field) { return field.getName(); } }); tableInfo = new TableInfo(new ConfigBuilder(GeneratorBuilder.packageConfig(), dataSourceConfig, strategyConfig, null, null, null), "user"); tableInfo.processTable(); Assertions.assertTrue(tableInfo.isConvert()); Assertions.assertEquals("Euser", tableInfo.getEntityName()); }
@Override protected List<MatchResult> match(List<String> specs) { List<AzfsResourceId> paths = specs.stream().map(AzfsResourceId::fromUri).collect(Collectors.toList()); List<AzfsResourceId> globs = new ArrayList<>(); List<AzfsResourceId> nonGlobs = new ArrayList<>(); List<Boolean> isGlobBooleans = new ArrayList<>(); for (AzfsResourceId path : paths) { if (path.isWildcard()) { globs.add(path); isGlobBooleans.add(true); } else { nonGlobs.add(path); isGlobBooleans.add(false); } } Iterator<MatchResult> globMatches = matchGlobPaths(globs).iterator(); Iterator<MatchResult> nonGlobMatches = matchNonGlobPaths(nonGlobs).iterator(); ImmutableList.Builder<MatchResult> matchResults = ImmutableList.builder(); for (Boolean isGlob : isGlobBooleans) { if (isGlob) { checkState( globMatches.hasNext(), "Internal error encountered in AzureBlobStoreFileSystem: expected more elements in globMatches."); matchResults.add(globMatches.next()); } else { checkState( nonGlobMatches.hasNext(), "Internal error encountered in AzureBlobStoreFileSystem: expected more elements in nonGlobMatches."); matchResults.add(nonGlobMatches.next()); } } checkState( !globMatches.hasNext(), "Internal error encountered in AzureBlobStoreFileSystem: expected no more elements in globMatches."); checkState( !nonGlobMatches.hasNext(), "Internal error encountered in AzureBlobStoreFileSystem: expected no more elements in nonGlobMatches."); return matchResults.build(); }
@Test @Ignore public void testMatch() throws Exception { // TODO: Write this test with mocks - see GcsFileSystemTest String container = "test-container" + randomUUID(); BlobContainerClient blobContainerClient = azureBlobStoreFileSystem.getClient().createBlobContainer(container); // Create files List<String> blobNames = new ArrayList<>(); blobNames.add("testdirectory/file1name"); blobNames.add("testdirectory/file2name"); blobNames.add("testdirectory/file3name"); blobNames.add("testdirectory/file4name"); blobNames.add("testdirectory/otherfile"); blobNames.add("testotherdirectory/anotherfile"); for (String blob : blobNames) { blobContainerClient.getBlobClient(blob).uploadFromFile("src/test/resources/in.txt"); } List<String> specs = ImmutableList.of( "azfs://account/" + container + "/testdirectory/file[1-3]*", "azfs://account/" + container + "/testdirectory/non-exist-file", "azfs://account/" + container + "/testdirectory/otherfile"); List<MatchResult> matchResults = azureBlobStoreFileSystem.match(specs); // Confirm that match results are as expected assertEquals(3, matchResults.size()); assertEquals(MatchResult.Status.OK, matchResults.get(0).status()); assertThat( ImmutableList.of( "azfs://account/" + container + "/testdirectory/file1name", "azfs://account/" + container + "/testdirectory/file2name", "azfs://account/" + container + "/testdirectory/file3name"), contains(toFilenames(matchResults.get(0)).toArray())); assertEquals(MatchResult.Status.NOT_FOUND, matchResults.get(1).status()); assertEquals(MatchResult.Status.OK, matchResults.get(2).status()); assertThat( ImmutableList.of("azfs://account/" + container + "/testdirectory/otherfile"), contains(toFilenames(matchResults.get(2)).toArray())); blobContainerClient.delete(); }
@Override public boolean enable(String pluginId) { return false; }
@Test public void testEnable() { ThreadPoolPlugin plugin = new TestPlugin(); Assert.assertFalse(manager.enable(plugin.getId())); manager.register(plugin); Assert.assertFalse(manager.enable(plugin.getId())); manager.disable(plugin.getId()); Assert.assertFalse(manager.enable(plugin.getId())); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command(config, MigrationsUtil::getKsqlClient); }
@Test public void shouldCleanMigrationsStreamEvenIfTableDoesntExist() throws Exception { // Given: givenMigrationsTableDoesNotExist(); // When: final int status = command.command(config, cfg -> client); // Then: assertThat(status, is(0)); verify(client).executeStatement("DROP STREAM " + MIGRATIONS_STREAM + " DELETE TOPIC;"); verify(client, never()).executeStatement("DROP TABLE " + MIGRATIONS_TABLE + " DELETE TOPIC;"); verify(client, never()).executeStatement("TERMINATE " + CTAS_QUERY_ID + ";"); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } final Region region = regionService.lookup(file); try { if(containerService.isContainer(file)) { final ContainerInfo info = session.getClient().getContainerInfo(region, containerService.getContainer(file).getName()); final PathAttributes attributes = new PathAttributes(); attributes.setSize(info.getTotalSize()); attributes.setRegion(info.getRegion().getRegionId()); return attributes; } final ObjectMetadata metadata; try { try { metadata = session.getClient().getObjectMetaData(region, containerService.getContainer(file).getName(), containerService.getKey(file)); } catch(GenericException e) { throw new SwiftExceptionMappingService().map("Failure to read attributes of {0}", e, file); } } catch(NotfoundException e) { if(file.isDirectory()) { // Directory placeholder file may be missing. Still return empty attributes when we find children try { new SwiftObjectListService(session).list(file, new CancellingListProgressListener()); } catch(ListCanceledException l) { // Found common prefix return PathAttributes.EMPTY; } catch(NotfoundException n) { throw e; } // Common prefix only return PathAttributes.EMPTY; } // Try to find pending large file upload final Write.Append append = new SwiftLargeObjectUploadFeature(session, regionService, new SwiftWriteFeature(session, regionService)).append(file, new TransferStatus()); if(append.append) { return new PathAttributes().withSize(append.offset); } throw e; } if(file.isDirectory()) { if(!StringUtils.equals(SwiftDirectoryFeature.DIRECTORY_MIME_TYPE, metadata.getMimeType())) { throw new NotfoundException(String.format("File %s has set MIME type %s but expected %s", file.getAbsolute(), metadata.getMimeType(), SwiftDirectoryFeature.DIRECTORY_MIME_TYPE)); } } if(file.isFile()) { if(StringUtils.equals(SwiftDirectoryFeature.DIRECTORY_MIME_TYPE, metadata.getMimeType())) { throw new NotfoundException(String.format("File %s has set MIME type %s", file.getAbsolute(), metadata.getMimeType())); } } return this.toAttributes(metadata); } catch(GenericException e) { throw new SwiftExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Failure to read attributes of {0}", e, file); } }
@Test public void testFindPlaceholder() throws Exception { final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("IAD"); final String name = UUID.randomUUID().toString(); final Path file = new Path(container, name, EnumSet.of(Path.Type.directory)); new SwiftDirectoryFeature(session).mkdir(file, new TransferStatus()); final PathAttributes attributes = new SwiftAttributesFinderFeature(session).find(file); assertNotNull(attributes.getChecksum().hash); // Test wrong type try { new SwiftAttributesFinderFeature(session).find(new Path(container, name, EnumSet.of(Path.Type.file))); fail(); } catch(NotfoundException e) { // Expected } new SwiftDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public Map<String, String> requestHeaders() { return unmodifiableMap(requestHeaders); }
@Test public void shouldReturnUnmodifiableRequestHeaders() throws Exception { DefaultGoPluginApiRequest request = new DefaultGoPluginApiRequest("extension", "1.0", "request-name"); Map<String, String> requestHeaders = request.requestHeaders(); try { requestHeaders.put("new-key", "new-value"); fail("Should not allow modification of request headers"); } catch (UnsupportedOperationException e) { } try { requestHeaders.remove("key"); fail("Should not allow modification of request headers"); } catch (UnsupportedOperationException e) { } }
@Path("batch") @POST public Response batchReplication(ReplicationList replicationList) { try { ReplicationListResponse batchResponse = new ReplicationListResponse(); for (ReplicationInstance instanceInfo : replicationList.getReplicationList()) { try { batchResponse.addResponse(dispatch(instanceInfo)); } catch (Exception e) { batchResponse.addResponse(new ReplicationInstanceResponse(Status.INTERNAL_SERVER_ERROR.getStatusCode(), null)); logger.error("{} request processing failed for batch item {}/{}", instanceInfo.getAction(), instanceInfo.getAppName(), instanceInfo.getId(), e); } } return Response.ok(batchResponse).build(); } catch (Throwable e) { logger.error("Cannot execute batch Request", e); return Response.status(Status.INTERNAL_SERVER_ERROR).build(); } }
@Test public void testRegisterBatching() throws Exception { ReplicationList replicationList = new ReplicationList(newReplicationInstanceOf(Action.Register, instanceInfo)); Response response = peerReplicationResource.batchReplication(replicationList); assertStatusOkReply(response); verify(applicationResource, times(1)).addInstance(instanceInfo, "true"); }
@Override public boolean matches(T objectUnderTest) { boolean matches = super.matches(objectUnderTest); describedAs(buildVerboseDescription(objectUnderTest, matches)); return matches; }
@Test public void should_succeed_and_display_description_without_actual() { assertThat(VERBOSE_CONDITION.matches("foo")).isTrue(); assertThat(VERBOSE_CONDITION).hasToString("shorter than 4"); }
@Override public boolean equals(Object obj) { if ( this == obj ) { return true; } if ( obj == null ) { return false; } if ( getClass() != obj.getClass() ) { return false; } final SelectionParameters other = (SelectionParameters) obj; if ( !equals( this.qualifiers, other.qualifiers ) ) { return false; } if ( !Objects.equals( this.qualifyingNames, other.qualifyingNames ) ) { return false; } if ( !Objects.equals( this.conditionQualifiers, other.conditionQualifiers ) ) { return false; } if ( !Objects.equals( this.conditionQualifyingNames, other.conditionQualifyingNames ) ) { return false; } if ( !Objects.equals( this.sourceRHS, other.sourceRHS ) ) { return false; } return equals( this.resultType, other.resultType ); }
@Test public void testDifferentResultTypes() { List<String> qualifyingNames = Arrays.asList( "language", "german" ); TypeMirror resultType = new TestTypeMirror( "resultType" ); List<TypeMirror> qualifiers = new ArrayList<>(); qualifiers.add( new TestTypeMirror( "org.mapstruct.test.SomeType" ) ); qualifiers.add( new TestTypeMirror( "org.mapstruct.test.SomeOtherType" ) ); SelectionParameters params = new SelectionParameters( qualifiers, qualifyingNames, resultType, typeUtils ); List<String> qualifyingNames2 = Arrays.asList( "language", "german" ); TypeMirror resultType2 = new TestTypeMirror( "otherResultType" ); List<TypeMirror> qualifiers2 = new ArrayList<>(); qualifiers2.add( new TestTypeMirror( "org.mapstruct.test.SomeType" ) ); qualifiers2.add( new TestTypeMirror( "org.mapstruct.test.SomeOtherType" ) ); SelectionParameters params2 = new SelectionParameters( qualifiers2, qualifyingNames2, resultType2, typeUtils ); assertThat( params.equals( params2 ) ).as( "Different resultType" ).isFalse(); assertThat( params2.equals( params ) ).as( "Different resultType" ).isFalse(); }
public static Function<Component, String> toComponentUuid() { return ToComponentUuid.INSTANCE; }
@Test public void toComponentUuid_returns_the_ref_of_the_Component() { assertThat(toComponentUuid().apply(ReportComponent.builder(PROJECT, SOME_INT).setUuid("uuid_" + SOME_INT).build())).isEqualTo("uuid_" + SOME_INT); }
@Override public void unbindSocialUser(Long userId, Integer userType, Integer socialType, String openid) { // 获得 openid 对应的 SocialUserDO 社交用户 SocialUserDO socialUser = socialUserMapper.selectByTypeAndOpenid(socialType, openid); if (socialUser == null) { throw exception(SOCIAL_USER_NOT_FOUND); } // 获得对应的社交绑定关系 socialUserBindMapper.deleteByUserTypeAndUserIdAndSocialType(userType, userId, socialUser.getType()); }
@Test public void testUnbindSocialUser_success() { // 准备参数 Long userId = 1L; Integer userType = UserTypeEnum.ADMIN.getValue(); Integer type = SocialTypeEnum.GITEE.getType(); String openid = "test_openid"; // mock 数据:社交用户 SocialUserDO socialUser = randomPojo(SocialUserDO.class).setType(type).setOpenid(openid); socialUserMapper.insert(socialUser); // mock 数据:社交绑定关系 SocialUserBindDO socialUserBind = randomPojo(SocialUserBindDO.class).setUserType(userType) .setUserId(userId).setSocialType(type); socialUserBindMapper.insert(socialUserBind); // 调用 socialUserService.unbindSocialUser(userId, userType, type, openid); // 断言 assertEquals(0, socialUserBindMapper.selectCount(null).intValue()); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testKarateCallThatReturnsJson() { run( "def res = karate.call('called3.js')" ); matchVar("res", "{ varA: '2', varB: '3' }"); }
static String getStreamFromOpenElement(String openElement) { String streamElement = openElement.replaceFirst("\\A<open ", "<stream:stream ") .replace("urn:ietf:params:xml:ns:xmpp-framing", "jabber:client") .replaceFirst("/>\\s*\\z", " xmlns:stream='http://etherx.jabber.org/streams'>") .replaceFirst("></open>\\s*\\z", " xmlns:stream='http://etherx.jabber.org/streams'>"); return streamElement; }
@Test public void getStreamFromOpenElementTest() { assertEquals(OPEN_STREAM, AbstractWebSocket.getStreamFromOpenElement(OPEN_ELEMENT)); assertEquals(OPEN_STREAM, AbstractWebSocket.getStreamFromOpenElement(OPEN_ELEMENT_EXPANDED)); }
@Nullable static String getPropertyIfString(Message message, String name) { try { Object o = message.getObjectProperty(name); if (o instanceof String) return o.toString(); return null; } catch (Throwable t) { propagateIfFatal(t); log(t, "error getting property {0} from message {1}", name, message); return null; } }
@Test void getPropertyIfString_null() { assertThat(MessageProperties.getPropertyIfString(message, "b3")).isNull(); }
protected RouteStatistic recalculate(final Route route, final RouteStatistic routeStatistic) { AtomicInteger totalEips = new AtomicInteger(routeStatistic.getTotalEips()); AtomicInteger totalEipsTested = new AtomicInteger(routeStatistic.getTotalEipsTested()); AtomicInteger totalProcessingTime = new AtomicInteger(routeStatistic.getTotalProcessingTime()); route.getComponents().getAttributeMap().values().forEach(eipAttributes -> { if (!routeStatistic.isTotalEipsInitialized()) { // prevent adding the route multiple times totalEips.getAndAdd(eipAttributes.size()); } eipAttributes.forEach(eipAttribute -> { totalEipsTested.getAndAdd(eipAttribute.getExchangesTotal()); totalProcessingTime.getAndAdd(eipAttribute.getTotalProcessingTime()); }); }); // this is a hack because of some weird calculation bug that I cannot find if (totalEipsTested.get() > totalEips.get()) { totalEipsTested.set(totalEips.get()); } int coverage = 0; if (totalEips.get() > 0) { coverage = (100 * totalEipsTested.get()) / totalEips.get(); } RouteStatistic retval = new RouteStatistic(); retval.setId(route.getId()); retval.setTotalEips(totalEips.get()); retval.setTotalEipsTested(totalEipsTested.get()); retval.setTotalProcessingTime(totalProcessingTime.get()); retval.setCoverage(coverage); retval.setTotalEipsInitialized(true); return retval; }
@Test public void testRecalculate() { }
@Override public Optional<WorkItem> getWorkItem() throws IOException { List<String> workItemTypes = ImmutableList.of( WORK_ITEM_TYPE_MAP_TASK, WORK_ITEM_TYPE_SEQ_MAP_TASK, WORK_ITEM_TYPE_REMOTE_SOURCE_TASK); // All remote sources require the "remote_source" capability. Dataflow's // custom sources are further tagged with the format "custom_source". List<String> capabilities = new ArrayList<String>( Arrays.asList( options.getWorkerId(), CAPABILITY_REMOTE_SOURCE, PropertyNames.CUSTOM_SOURCE_FORMAT)); if (options.getWorkerPool() != null) { capabilities.add(options.getWorkerPool()); } Optional<WorkItem> workItem = getWorkItemInternal(workItemTypes, capabilities); if (!workItem.isPresent()) { // Normal case, this means that the response contained no work, i.e. no work is available // at this time. return Optional.empty(); } if (workItem.get().getId() == null) { logger.debug("Discarding invalid work item {}", workItem.get()); return Optional.empty(); } WorkItem work = workItem.get(); final String stage; if (work.getMapTask() != null) { stage = work.getMapTask().getStageName(); logger.info("Starting MapTask stage {}", stage); } else if (work.getSeqMapTask() != null) { stage = work.getSeqMapTask().getStageName(); logger.info("Starting SeqMapTask stage {}", stage); } else if (work.getSourceOperationTask() != null) { stage = work.getSourceOperationTask().getStageName(); logger.info("Starting SourceOperationTask stage {}", stage); } else { stage = null; } DataflowWorkerLoggingMDC.setStageName(stage); stageStartTime.set(DateTime.now()); DataflowWorkerLoggingMDC.setWorkId(Long.toString(work.getId())); return workItem; }
@Test public void testCloudServiceCallMapTaskStagePropagation() throws Exception { // Publish and acquire a map task work item, and verify we're now processing that stage. final String stageName = "test_stage_name"; MapTask mapTask = new MapTask(); mapTask.setStageName(stageName); WorkItem workItem = createWorkItem(PROJECT_ID, JOB_ID); workItem.setMapTask(mapTask); MockLowLevelHttpResponse response = generateMockResponse(workItem); MockLowLevelHttpRequest request = new MockLowLevelHttpRequest().setResponse(response); MockHttpTransport transport = new MockHttpTransport.Builder().setLowLevelHttpRequest(request).build(); DataflowWorkerHarnessOptions pipelineOptions = createPipelineOptionsWithTransport(transport); WorkUnitClient client = new DataflowWorkUnitClient(pipelineOptions, LOG); assertEquals(Optional.of(workItem), client.getWorkItem()); assertEquals(stageName, DataflowWorkerLoggingMDC.getStageName()); }
private String getEnv(String envName, InterpreterLaunchContext context) { String env = context.getProperties().getProperty(envName); if (StringUtils.isBlank(env)) { env = System.getenv(envName); } if (StringUtils.isBlank(env)) { LOGGER.warn("environment variable: {} is empty", envName); } return env; }
@Test void testYarnClientMode_2() throws IOException { SparkInterpreterLauncher launcher = new SparkInterpreterLauncher(zConf, null); Properties properties = new Properties(); properties.setProperty("SPARK_HOME", sparkHome); properties.setProperty("property_1", "value_1"); properties.setProperty("spark.master", "yarn"); properties.setProperty("spark.submit.deployMode", "client"); properties.setProperty("spark.files", "file_1"); properties.setProperty("spark.jars", "jar_1"); InterpreterOption option = new InterpreterOption(); InterpreterLaunchContext context = new InterpreterLaunchContext(properties, option, null, "user1", "intpGroupId", "groupId", "spark", "spark", 0, "host"); InterpreterClient client = launcher.launch(context); assertTrue( client instanceof ExecRemoteInterpreterProcess); try (ExecRemoteInterpreterProcess interpreterProcess = (ExecRemoteInterpreterProcess) client) { assertEquals("spark", interpreterProcess.getInterpreterSettingName()); assertTrue(interpreterProcess.getInterpreterDir().endsWith("/interpreter/spark")); assertTrue(interpreterProcess.getLocalRepoDir().endsWith("/local-repo/groupId")); assertEquals(zConf.getInterpreterRemoteRunnerPath(), interpreterProcess.getInterpreterRunner()); assertTrue(interpreterProcess.getEnv().size() >= 2); assertEquals(sparkHome, interpreterProcess.getEnv().get("SPARK_HOME")); String sparkJars = "jar_1"; String sparkrZip = sparkHome + "/R/lib/sparkr.zip#sparkr"; String sparkFiles = "file_1"; String expected = "--conf|spark.yarn.dist.archives=" + sparkrZip + "|--conf|spark.files=" + sparkFiles + "|--conf|spark.jars=" + sparkJars + "|--conf|spark.submit.deployMode=client" + "|--conf|spark.yarn.isPython=true|--conf|spark.app.name=intpGroupId|--conf|spark.master=yarn"; assertTrue(CollectionUtils.isEqualCollection(Arrays.asList(expected.split("\\|")), Arrays.asList(interpreterProcess.getEnv().get("ZEPPELIN_SPARK_CONF").split("\\|")))); } }
public static String join(List<PdlParser.IdentifierContext> identifiers) { StringBuilder stringBuilder = new StringBuilder(); Iterator<PdlParser.IdentifierContext> iter = identifiers.iterator(); while (iter.hasNext()) { stringBuilder.append(iter.next().value); if (iter.hasNext()) { stringBuilder.append("."); } } return stringBuilder.toString(); }
@Test public void testJoin() { PdlParser.IdentifierContext a = new PdlParser.IdentifierContext(null, 0); a.value = "a"; PdlParser.IdentifierContext b = new PdlParser.IdentifierContext(null, 0); b.value = "b"; assertEquals(PdlParseUtils.join(Arrays.asList(a, b)), "a.b"); }
@Override public String[] getOutputMetricKeys() { return new String[] {metricKey}; }
@Test public void check_output_metric_key_is_function_complexity_distribution() { assertThat(BASIC_DISTRIBUTION_FORMULA.getOutputMetricKeys()).containsOnly(FUNCTION_COMPLEXITY_DISTRIBUTION_KEY); }
public String getToken() throws IOException { LOGGER.debug("[Agent Registration] Using URL {} to get a token.", tokenURL); HttpRequestBase getTokenRequest = (HttpRequestBase) RequestBuilder.get(tokenURL) .addParameter("uuid", agentRegistry.uuid()) .build(); try (CloseableHttpResponse response = httpClient.execute(getTokenRequest)) { if (response.getStatusLine().getStatusCode() == SC_OK) { LOGGER.info("The server has generated token for the agent."); return responseBody(response); } else { LOGGER.error("[Agent Registration] Got status {} from GoCD", response.getStatusLine()); String error = Optional.ofNullable(ContentType.get(response.getEntity())) .filter(ct -> ContentType.TEXT_HTML.getMimeType().equals(ct.getMimeType())) .map(ignore -> "<non-machine HTML response>") .orElseGet(() -> responseBody(response)); throw new RuntimeException(String.format("Agent registration could not acquire token due to %s: %s", response.getStatusLine(), error)); } } finally { getTokenRequest.releaseConnection(); } }
@Test void shouldGetTokenFromServer() throws Exception { final ArgumentCaptor<HttpRequestBase> argumentCaptor = ArgumentCaptor.forClass(HttpRequestBase.class); final CloseableHttpResponse httpResponse = mock(CloseableHttpResponse.class); when(agentRegistry.uuid()).thenReturn("agent-uuid"); when(httpClient.execute(any(HttpRequestBase.class))).thenReturn(httpResponse); when(httpResponse.getEntity()).thenReturn(new StringEntity("token-from-server")); when(httpResponse.getStatusLine()).thenReturn(new BasicStatusLine(new ProtocolVersion("https", 1, 2), SC_OK, null)); final String token = tokenRequester.getToken(); verify(httpClient).execute(argumentCaptor.capture()); final HttpRequestBase requestBase = argumentCaptor.getValue(); final List<NameValuePair> nameValuePairs = URLEncodedUtils.parse(requestBase.getURI(), StandardCharsets.UTF_8); assertThat(token).isEqualTo("token-from-server"); assertThat(findParam(nameValuePairs, "uuid").getValue()).isEqualTo("agent-uuid"); }
@Override public Reiterator<ShuffleEntry> read( @Nullable ShufflePosition startPosition, @Nullable ShufflePosition endPosition) { return new ShuffleReadIterator(startPosition, endPosition); }
@Test public void readerShouldMergeMultipleBatchResultsIncludingEmptyShards() throws Exception { List<ShuffleEntry> e1s = new ArrayList<>(); List<ShuffleEntry> e2s = new ArrayList<>(); ShuffleEntry e3 = newShuffleEntry(KEY, SKEY, VALUE); List<ShuffleEntry> e3s = Collections.singletonList(e3); when(batchReader.read(START_POSITION, END_POSITION)) .thenReturn(new ShuffleBatchReader.Batch(e1s, NEXT_START_POSITION)); when(batchReader.read(NEXT_START_POSITION, END_POSITION)) .thenReturn(new ShuffleBatchReader.Batch(e2s, SECOND_NEXT_START_POSITION)); when(batchReader.read(SECOND_NEXT_START_POSITION, END_POSITION)) .thenReturn(new ShuffleBatchReader.Batch(e3s, null)); List<ShuffleEntry> results = newArrayList(reader.read(START_POSITION, END_POSITION)); assertThat(results, contains(e3)); verify(batchReader).read(START_POSITION, END_POSITION); verify(batchReader).read(NEXT_START_POSITION, END_POSITION); verify(batchReader).read(SECOND_NEXT_START_POSITION, END_POSITION); verifyNoMoreInteractions(batchReader); }
@Override public InputStream getInputStream(final int columnIndex, final String type) throws SQLException { return mergedResult.getInputStream(columnIndex, type); }
@Test void assertGetInputStream() throws SQLException { InputStream inputStream = mock(InputStream.class); when(mergedResult.getInputStream(1, "asc")).thenReturn(inputStream); assertThat(new EncryptMergedResult(database, encryptRule, selectStatementContext, mergedResult).getInputStream(1, "asc"), is(inputStream)); }
public <T> Gauge<T> registerGauge(String name, Gauge<T> metric) throws IllegalArgumentException { return register(name, metric); }
@Test public void infersGaugeType() { Gauge<Long> gauge = registry.registerGauge("gauge", () -> 10_000_000_000L); assertThat(gauge.getValue()).isEqualTo(10_000_000_000L); }
@Override public void persist(final String key, final String value) { try { if (isExisted(key)) { update(key, value); return; } String tempPrefix = ""; String parent = SEPARATOR; String[] paths = Arrays.stream(key.split(SEPARATOR)).filter(each -> !Strings.isNullOrEmpty(each)).toArray(String[]::new); // Create key level directory recursively. for (int i = 0; i < paths.length - 1; i++) { String tempKey = tempPrefix + SEPARATOR + paths[i]; if (!isExisted(tempKey)) { insert(tempKey, "", parent); } tempPrefix = tempKey; parent = tempKey; } insert(key, value, parent); } catch (final SQLException ex) { log.error("Persist {} data to key: {} failed", getType(), key, ex); } }
@Test void assertPersistForDirectory() throws SQLException { final String key = "/parent/child/test1"; final String value = "test1_content"; when(mockJdbcConnection.prepareStatement(repositorySQL.getSelectByKeySQL())).thenReturn(mockPreparedStatement); when(mockJdbcConnection.prepareStatement(repositorySQL.getInsertSQL())).thenReturn(mockPreparedStatementForPersist); when(mockPreparedStatement.executeQuery()).thenReturn(mockResultSet); repository.persist(key, value); int depthOfDirectory = (int) key.chars().filter(each -> each == '/').count(); int beginIndex = 0; String parentDirectory = "/"; for (int i = 0; i < depthOfDirectory; i++) { int separatorIndex = key.indexOf('/', beginIndex); int nextSeparatorIndex = key.indexOf('/', separatorIndex + 1); if (nextSeparatorIndex == -1) { nextSeparatorIndex = key.length(); } String directoryPath = key.substring(0, nextSeparatorIndex); // Verifying if get operation is called for every directory level verify(mockPreparedStatement).setString(1, directoryPath); // Verifying that during insert operation, setString at index 2 is called for every directory level verify(mockPreparedStatementForPersist).setString(2, directoryPath); // Verifying that during insert operation, setString at index 4 is called for every parent directory verify(mockPreparedStatementForPersist).setString(4, parentDirectory); beginIndex = nextSeparatorIndex; parentDirectory = directoryPath; } // Verifying that during insert operation, setString at index 3 is called with "" for all the parent directories verify(mockPreparedStatementForPersist, times(depthOfDirectory - 1)).setString(3, ""); // Verifying that during insert operation, setString at index 3 is called with the leaf node once verify(mockPreparedStatementForPersist).setString(3, "test1_content"); // Verifying that during insert operation, setString at index 1 is called with a UUID verify(mockPreparedStatementForPersist, times(depthOfDirectory)).setString(eq(1), anyString()); // Verifying that executeOperation in insert is called for all the directory levels verify(mockPreparedStatementForPersist, times(depthOfDirectory)).executeUpdate(); }
@Override public boolean isIn(String ipAddress) { if (ipAddress == null || addressList == null) { return false; } return addressList.includes(ipAddress); }
@Test public void testNullIP() throws IOException { String[] ips = {"10.119.103.112", "10.221.102.0/23"}; createFileWithEntries ("ips.txt", ips); IPList ipList = new FileBasedIPList("ips.txt"); assertFalse ("Null Ip is in the list", ipList.isIn(null)); }
public boolean hasConfig(String resource) { if (resource == null) { return false; } return !getRules(resource).isEmpty(); }
@Test public void testHasConfig() { // Setup final Map<String, List<FlowRule>> rulesMap = generateFlowRules(true); // Run the test and verify the results ruleManager.updateRules(rulesMap); assertTrue(ruleManager.hasConfig("rule1")); assertFalse(ruleManager.hasConfig("rule3")); }
@Override protected String ruleHandler() { return ""; }
@Test public void testRuleHandler() { assertEquals(StringUtils.EMPTY, shenyuClientRegisterTarsService.ruleHandler()); }
public AnnotatedLargeText obtainLog() { WeakReference<AnnotatedLargeText> l = log; if (l == null) return null; return l.get(); }
@Test public void annotatedText() throws Exception { MyTaskAction action = new MyTaskAction(); action.start(); AnnotatedLargeText annotatedText = action.obtainLog(); while (!annotatedText.isComplete()) { Thread.sleep(10); } ByteArrayOutputStream os = new ByteArrayOutputStream(); final long length = annotatedText.writeLogTo(0, os); // Windows based systems will be 220, linux base 219 assertTrue("length should be longer or even 219", length >= 219); assertTrue(os.toString(StandardCharsets.UTF_8).startsWith("a linkCompleted")); }
public static String getUnresolvedSchemaName(final Schema schema) { if (!isUnresolvedSchema(schema)) { throw new IllegalArgumentException("Not a unresolved schema: " + schema); } return schema.getProp(UR_SCHEMA_ATTR); }
@Test(expected = IllegalArgumentException.class) public void testIsUnresolvedSchemaError2() { // No "UnresolvedSchema" property Schema s = SchemaBuilder.record("R").prop("org.apache.avro.idl.unresolved.name", "x").fields().endRecord(); SchemaResolver.getUnresolvedSchemaName(s); }
public static int pageSize() { return PAGE_SIZE; }
@Test public void test_pageSize() { assertEquals(UnsafeLocator.UNSAFE.pageSize(), OS.pageSize()); }
@Deprecated @Override public AuthData authenticate(AuthData authData) throws AuthenticationException { // This method is not expected to be called and is subject to removal. throw new AuthenticationException("Not supported"); }
@SuppressWarnings("deprecation") @Test void authenticateShouldThrowNotImplementedException() { AuthenticationStateOpenID state = new AuthenticationStateOpenID(null, null, null); try { state.authenticate(null); fail("Expected AuthenticationException to be thrown"); } catch (AuthenticationException e) { assertEquals(e.getMessage(), "Not supported"); } }
public void setExceptionMessage(String exceptionMessage) { this.exceptionMessage = exceptionMessage; this.status = FactMappingValueStatus.FAILED_WITH_EXCEPTION; }
@Test public void setExceptionMessage() { String exceptionValue = "Exception"; value.setExceptionMessage(exceptionValue); assertThat(value.getStatus()).isEqualTo(FactMappingValueStatus.FAILED_WITH_EXCEPTION); assertThat(value.getExceptionMessage()).isEqualTo(exceptionValue); assertThat(value.getErrorValue()).isNull(); assertThat(value.getCollectionPathToValue()).isNull(); }
@Override public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) { return offsetsForTimes(timestampsToSearch, Duration.ofMillis(defaultApiTimeoutMs)); }
@Test public void testOffsetsForTimesOnNullPartitions() { consumer = newConsumer(); assertThrows(NullPointerException.class, () -> consumer.offsetsForTimes(null, Duration.ofMillis(1))); }
public boolean hasReadPermissionForWholeCollection(final Subject subject, final String collection) { return readPermissionForCollection(collection) .map(rp -> rp.equals(DbEntity.ALL_ALLOWED) || subject.isPermitted(rp + ":*")) .orElse(false); }
@Test void hasReadPermissionForWholeCollectionReturnsFalseWhenSubjectMissesPermission() { doReturn(Optional.of( new DbEntityCatalogEntry("streams", "title", StreamImpl.class, "streams:read")) ).when(catalog) .getByCollectionName("streams"); doReturn(false).when(subject).isPermitted("streams:read:*"); final boolean hasReadPermissions = toTest.hasReadPermissionForWholeCollection(subject, "streams"); assertFalse(hasReadPermissions); }
public static void readFully(InputStream stream, byte[] bytes, int offset, int length) throws IOException { int bytesRead = readRemaining(stream, bytes, offset, length); if (bytesRead < length) { throw new EOFException( "Reached the end of stream with " + (length - bytesRead) + " bytes left to read"); } }
@Test public void testReadFullySmallReadsWithStartAndLength() throws IOException { byte[] buffer = new byte[10]; MockInputStream stream = new MockInputStream(2, 2, 3); IOUtil.readFully(stream, buffer, 2, 5); assertThat(Arrays.copyOfRange(buffer, 2, 7)) .as("Byte array contents should match") .isEqualTo(Arrays.copyOfRange(MockInputStream.TEST_ARRAY, 0, 5)); assertThat(stream.getPos()).as("Stream position should reflect bytes read").isEqualTo(5); }
@Override public void writeMine(NodeHealth nodeHealth) { requireNonNull(nodeHealth, "nodeHealth can't be null"); Map<UUID, TimestampedNodeHealth> sqHealthState = readReplicatedMap(); if (LOG.isTraceEnabled()) { LOG.trace("Reading {} and adding {}", new HashMap<>(sqHealthState), nodeHealth); } sqHealthState.put(hzMember.getUuid(), new TimestampedNodeHealth(nodeHealth, hzMember.getClusterTime())); }
@Test public void write_fails_with_NPE_if_arg_is_null() { assertThatThrownBy(() -> underTest.writeMine(null)) .isInstanceOf(NullPointerException.class) .hasMessage("nodeHealth can't be null"); }
public Schema mergeTables( Map<FeatureOption, MergingStrategy> mergingStrategies, Schema sourceSchema, List<SqlNode> derivedColumns, List<SqlWatermark> derivedWatermarkSpecs, SqlTableConstraint derivedPrimaryKey) { SchemaBuilder schemaBuilder = new SchemaBuilder( mergingStrategies, sourceSchema, (FlinkTypeFactory) validator.getTypeFactory(), dataTypeFactory, validator, escapeExpression); schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns); schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs); schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey); return schemaBuilder.build(); }
@Test void mergeMetadataColumns() { Schema sourceSchema = Schema.newBuilder() .column("one", DataTypes.INT()) .columnByMetadata("two", DataTypes.INT(), false) .columnByExpression("c", "ABS(two)") .build(); List<SqlNode> derivedColumns = Arrays.asList( regularColumn("three", DataTypes.INT()), metadataColumn("four", DataTypes.INT(), true)); Schema mergedSchema = util.mergeTables( getDefaultMergingStrategies(), sourceSchema, derivedColumns, Collections.emptyList(), null); Schema expectedSchema = Schema.newBuilder() .column("one", DataTypes.INT()) .columnByMetadata("two", DataTypes.INT(), false) .columnByExpression("c", "ABS(two)") .column("three", DataTypes.INT()) .columnByMetadata("four", DataTypes.INT(), true) .build(); assertThat(mergedSchema).isEqualTo(expectedSchema); }
@Override public void onEvent(Event event) { if (EnvUtil.getStandaloneMode()) { return; } if (event instanceof ClientEvent.ClientVerifyFailedEvent) { syncToVerifyFailedServer((ClientEvent.ClientVerifyFailedEvent) event); } else { syncToAllServer((ClientEvent) event); } }
@Test void testOnClientDisconnectEventSuccess() { distroClientDataProcessor.onEvent(new ClientEvent.ClientDisconnectEvent(client, true)); verify(distroProtocol, never()).syncToTarget(any(), any(), anyString(), anyLong()); verify(distroProtocol).sync(any(), eq(DataOperation.DELETE)); }
@Override public String name() { return name; }
@Test public void testSetNamespaceOwnershipNoop() throws TException, IOException { setNamespaceOwnershipAndVerify( "set_ownership_noop_1", ImmutableMap.of(HiveCatalog.HMS_DB_OWNER, "some_individual_owner"), ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_individual_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.USER.name()), "some_individual_owner", PrincipalType.USER, "some_individual_owner", PrincipalType.USER); setNamespaceOwnershipAndVerify( "set_ownership_noop_2", ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_group_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.GROUP.name()), ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_group_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.GROUP.name()), "some_group_owner", PrincipalType.GROUP, "some_group_owner", PrincipalType.GROUP); setNamespaceOwnershipAndVerify( "set_ownership_noop_3", ImmutableMap.of(), ImmutableMap.of(), UserGroupInformation.getCurrentUser().getShortUserName(), PrincipalType.USER, UserGroupInformation.getCurrentUser().getShortUserName(), PrincipalType.USER); setNamespaceOwnershipAndVerify( "set_ownership_noop_4", ImmutableMap.of( HiveCatalog.HMS_DB_OWNER, "some_group_owner", HiveCatalog.HMS_DB_OWNER_TYPE, PrincipalType.GROUP.name()), ImmutableMap.of("unrelated_prop_1", "value_1", "unrelated_prop_2", "value_2"), "some_group_owner", PrincipalType.GROUP, "some_group_owner", PrincipalType.GROUP); }
@Override public <T> List<SearchResult<T>> search(SearchRequest request, Class<T> typeFilter) { SearchSession<T> session = new SearchSession<>(request, Collections.singleton(typeFilter)); if (request.inParallel()) { ForkJoinPool commonPool = ForkJoinPool.commonPool(); getProviderTasks(request, session).stream().map(commonPool::submit).forEach(ForkJoinTask::join); } else { getProviderTasks(request, session).forEach(Runnable::run); } return session.getResults(); }
@Test public void testUniqueNode() { GraphGenerator generator = GraphGenerator.build().generateTinyGraph(); generator.getGraph().getNode(GraphGenerator.FIRST_NODE).setLabel(GraphGenerator.FIRST_NODE); SearchRequest request = buildRequest(GraphGenerator.FIRST_NODE, generator); Collection<SearchResult<Node>> results = controller.search(request, Node.class); Assert.assertEquals(1, results.size()); SearchResult<Node> result = results.iterator().next(); Assert.assertEquals(GraphGenerator.FIRST_NODE, result.getResult().getId()); Assert.assertEquals(NodeIdSearchProvider.toHtmlDisplay(result.getResult()), result.getHtmlDisplay()); }
public DoubleArrayAsIterable usingExactEquality() { return new DoubleArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject()); }
@Test public void usingExactEquality_contains_successWithNaN() { assertThat(array(1.1, NaN, 3.3)).usingExactEquality().contains(NaN); }
@Override @PublicAPI(usage = ACCESS) public boolean isAnnotatedWith(Class<? extends Annotation> annotationType) { return isAnnotatedWith(annotationType.getName()); }
@Test public void isAnnotatedWith_type_on_resolved_target() { JavaCall<?> call = simulateCall().from(Origin.class, "call").to(Target.class, "called"); assertThat(call.getTarget().isAnnotatedWith(QueriedAnnotation.class)) .as("target is annotated with @" + QueriedAnnotation.class.getSimpleName()) .isTrue(); assertThat(call.getTarget().isAnnotatedWith(Deprecated.class)) .as("target is annotated with @" + Deprecated.class.getSimpleName()) .isFalse(); }
@Override public TokenIdent cancelToken(Token<TokenIdent> token, String canceller) throws IOException { ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); DataInputStream in = new DataInputStream(buf); TokenIdent id = createIdentifier(); id.readFields(in); syncLocalCacheWithZk(id); return super.cancelToken(token, canceller); }
@SuppressWarnings("unchecked") @Test public void testCancelTokenSingleManager() throws Exception { for (int i = 0; i < TEST_RETRIES; i++) { DelegationTokenManager tm1 = null; String connectString = zkServer.getConnectString(); Configuration conf = getSecretConf(connectString); tm1 = new DelegationTokenManager(conf, new Text("foo")); tm1.init(); Token<DelegationTokenIdentifier> token = (Token<DelegationTokenIdentifier>) tm1.createToken(UserGroupInformation.getCurrentUser(), "foo"); Assert.assertNotNull(token); tm1.cancelToken(token, "foo"); try { verifyTokenFail(tm1, token); fail("Expected InvalidToken"); } catch (SecretManager.InvalidToken it) { it.printStackTrace(); } verifyDestroy(tm1, conf); } }
@Override protected void initChannel(SocketChannel socketChannel) throws Exception { ChannelPipeline p = socketChannel.pipeline(); p.addLast(new HttpRequestDecoder()); p.addLast(new HttpObjectAggregator(1024 * 1024)); p.addLast(new HttpResponseEncoder()); p.addLast(new HttpServerHandler()); }
@Test public void testInitChannel() throws Exception { // Mock Objects HttpServerInitializer httpServerInitializer = mock(HttpServerInitializer.class); SocketChannel socketChannel = mock(SocketChannel.class); ChannelPipeline channelPipeline = mock(ChannelPipeline.class); // Mock SocketChannel#pipeline() method when(socketChannel.pipeline()).thenReturn(channelPipeline); // HttpServerInitializer#initChannel(SocketChannel) call real method doCallRealMethod().when(httpServerInitializer).initChannel(socketChannel); // Start test for HttpServerInitializer#initChannel(SocketChannel) httpServerInitializer.initChannel(socketChannel); // Verify 4 times calling ChannelPipeline#addLast() method verify(channelPipeline, times(4)).addLast(any(ChannelHandler.class)); // Verify the order of calling ChannelPipeline#addLast() method InOrder inOrder = inOrder(channelPipeline); inOrder.verify(channelPipeline).addLast(any(HttpRequestDecoder.class)); inOrder.verify(channelPipeline).addLast(any(HttpObjectAggregator.class)); inOrder.verify(channelPipeline).addLast(any(HttpResponseEncoder.class)); inOrder.verify(channelPipeline).addLast(any(HttpServerHandler.class)); }
@Override public boolean matchesType(Type t) { if (t.isPseudoType()) { return t.matchesType(this); } if (!t.isStructType()) { return false; } StructType rhsType = (StructType) t; if (fields.size() != rhsType.fields.size()) { return false; } for (int i = 0; i < fields.size(); ++i) { if (!fields.get(i).getType().matchesType(rhsType.fields.get(i).getType())) { return false; } if (!StringUtils.equalsIgnoreCase(fields.get(i).getName(), rhsType.fields.get(i).getName())) { return false; } } return true; }
@Test public void testStructMatchType() throws Exception { // "struct<struct_test:int,c1:struct<c1:int,cc1:string>>" StructType c1 = new StructType(Lists.newArrayList( new StructField("c1", ScalarType.createType(PrimitiveType.INT)), new StructField("cc1", ScalarType.createDefaultCatalogString()) )); StructType root = new StructType(Lists.newArrayList( new StructField("struct_test", ScalarType.createType(PrimitiveType.INT)), new StructField("c1", c1) )); // PseudoType MapType Type t = new AnyMapType(); Assert.assertFalse(root.matchesType(t)); // MapType Type keyType = ScalarType.createType(PrimitiveType.INT); Type valueType = ScalarType.createCharType(10); Type mapType = new MapType(keyType, valueType); Assert.assertFalse(root.matchesType(mapType)); // Different fields length StructType c = new StructType(Lists.newArrayList( new StructField("c1", ScalarType.createType(PrimitiveType.INT)))); Assert.assertFalse(root.matchesType(c)); // Types will match with different field names StructType diffName = new StructType(Lists.newArrayList( new StructField("st", ScalarType.createType(PrimitiveType.INT)), new StructField("cc", c1) )); Assert.assertFalse(root.matchesType(diffName)); // Different field type StructType diffType = new StructType(Lists.newArrayList( new StructField("struct_test", ScalarType.createType(PrimitiveType.INT)), new StructField("c1", ScalarType.createType(PrimitiveType.INT)) )); Assert.assertFalse(root.matchesType(diffType)); // matched StructType mc1 = new StructType(Lists.newArrayList( new StructField("c1", ScalarType.createType(PrimitiveType.INT)), new StructField("cc1", ScalarType.createDefaultCatalogString()) )); StructType matched = new StructType(Lists.newArrayList( new StructField("struct_test", ScalarType.createType(PrimitiveType.INT)), new StructField("c1", mc1) )); Assert.assertTrue(root.matchesType(matched)); // Won't match with different subfield order StructType mc2 = new StructType(Lists.newArrayList( new StructField("cc1", ScalarType.createDefaultCatalogString()), new StructField("c1", ScalarType.createType(PrimitiveType.INT)) )); StructType matchedDiffOrder = new StructType(Lists.newArrayList( new StructField("c1", mc2), new StructField("struct_test", ScalarType.createType(PrimitiveType.INT)) )); Assert.assertFalse(root.matchesType(matchedDiffOrder)); }
@Override public List<TransferItem> normalize(final List<TransferItem> roots) { final List<TransferItem> normalized = new ArrayList<>(); for(TransferItem upload : roots) { boolean duplicate = false; for(Iterator<TransferItem> iter = normalized.iterator(); iter.hasNext(); ) { TransferItem n = iter.next(); if(upload.local.isChild(n.local)) { // The selected file is a child of a directory already included duplicate = true; break; } if(n.local.isChild(upload.local)) { iter.remove(); } if(upload.remote.equals(n.remote)) { // The selected file has the same name; if uploaded as a root element // it would overwrite the earlier final Path parent = upload.remote.getParent(); final String filename = upload.remote.getName(); String proposal; int no = 0; int index = filename.lastIndexOf('.'); Path remote; do { if(index != -1 && index != 0) { proposal = String.format("%s-%d%s", filename.substring(0, index), ++no, filename.substring(index)); } else { proposal = String.format("%s-%d", filename, ++no); } remote = new Path(parent, proposal, upload.remote.getType()); } while(false);//(upload.exists()); if(log.isInfoEnabled()) { log.info(String.format("Changed name from %s to %s", filename, remote.getName())); } upload.remote = remote; } } // Prunes the list of selected files. Files which are a child of an already included directory // are removed from the returned list. if(!duplicate) { normalized.add(new TransferItem(upload.remote, upload.local)); } } return normalized; }
@Test public void testNormalize() { UploadRootPathsNormalizer n = new UploadRootPathsNormalizer(); final List<TransferItem> list = new ArrayList<>(); list.add(new TransferItem(new Path("/a", EnumSet.of(Path.Type.directory)), new NullLocal(System.getProperty("java.io.tmpdir"), "a") { @Override public boolean isDirectory() { return true; } @Override public boolean isFile() { return false; } })); list.add(new TransferItem(new Path("/a", EnumSet.of(Path.Type.file)), new NullLocal(System.getProperty("java.io.tmpdir"), "a" + File.separator + "b"))); final List<TransferItem> normalized = n.normalize(list); assertEquals(1, normalized.size()); final TransferItem i = normalized.iterator().next(); assertEquals(new Path("/a", EnumSet.of(Path.Type.directory)), i.remote); }
public static void checkState(boolean isValid, String message) throws IllegalStateException { if (!isValid) { throw new IllegalStateException(message); } }
@Test public void testCheckState() { try { Preconditions.checkState(true, "Test message: %s %s", 12, null); } catch (IllegalStateException e) { Assert.fail("Should not throw exception when isValid is true"); } try { Preconditions.checkState(false, "Test message: %s %s", 12, null); Assert.fail("Should throw exception when isValid is false"); } catch (IllegalStateException e) { Assert.assertEquals("Should format message", "Test message: 12 null", e.getMessage()); } }
@Override protected double maintain() { if (!nodeRepository().zone().cloud().dynamicProvisioning()) return 1.0; // Not relevant in zones with static capacity if (nodeRepository().zone().environment().isTest()) return 1.0; // Short-lived deployments if (!nodeRepository().nodes().isWorking()) return 0.0; NodeList allNodes = nodeRepository().nodes().list(); if (!NodeMover.zoneIsStable(allNodes)) return 1.0; return upgradeHostFlavor(allNodes); }
@Test public void maintain() { String flavor0 = "host"; String flavor1 = "host2"; NodeFlavors flavors = FlavorConfigBuilder.createDummies(flavor0, flavor1); MockHostProvisioner hostProvisioner = new MockHostProvisioner(flavors.getFlavors()); ProvisioningTester tester = new ProvisioningTester.Builder().dynamicProvisioning() .flavors(flavors.getFlavors()) .hostProvisioner(hostProvisioner) .build(); ApplicationId app = ProvisioningTester.applicationId(); NodeResources resources = new NodeResources(4, 8, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote); ClusterSpec spec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("c1")).vespaVersion("1").build(); Capacity capacity = Capacity.from(new ClusterResources(2, 1, resources)); List<MockDeployer.ApplicationContext> applications = List.of(new MockDeployer.ApplicationContext(app, spec, capacity)); MockDeployer deployer = new MockDeployer(tester.provisioner(), tester.clock(), applications); HostFlavorUpgrader upgrader = new HostFlavorUpgrader(tester.nodeRepository(), Duration.ofDays(1), new TestMetric(), deployer, hostProvisioner); // Provision hosts and deploy application tester.makeReadyNodes(2, flavor0, NodeType.host); tester.activateTenantHosts(); tester.deploy(app, spec, capacity); Node host = tester.nodeRepository().nodes().list().hosts().first().get(); assertEquals(flavor0, host.flavor().name()); // Nothing to upgrade initially assertEquals(1, upgrader.maintain()); assertEquals(NodeList.of(), tester.nodeRepository().nodes().list() .matching(h -> h.status().wantToUpgradeFlavor())); // Mark flavor as upgradable, but fail all provisioning requests hostProvisioner.addUpgradableFlavor(flavor0) .with(Behaviour.failProvisionRequest); assertEquals(1, upgrader.maintain()); assertEquals(NodeList.of(), tester.nodeRepository().nodes().list() .matching(node -> node.status().wantToUpgradeFlavor() || node.status().wantToRetire()), "No hosts marked for upgrade or retirement"); // First provision request fails, but we only try once for the same flavor hostProvisioner.with(Behaviour.failProvisionRequest, 1); assertEquals(1, upgrader.maintain()); NodeList nodes = tester.nodeRepository().nodes().list(); assertEquals(0, nodes.matching(node -> node.status().wantToUpgradeFlavor()).size()); // Second succeeds and a replacement host starts provisioning assertEquals(1, upgrader.maintain()); nodes = tester.nodeRepository().nodes().list(); NodeList upgradingFlavor = nodes.matching(node -> node.status().wantToUpgradeFlavor()); assertEquals(1, upgradingFlavor.size()); assertEquals(1, nodes.state(Node.State.provisioned).size()); // No more upgrades are started while host is retiring assertEquals(1, upgrader.maintain()); assertEquals(upgradingFlavor, tester.nodeRepository().nodes().list() .matching(node -> node.status().wantToUpgradeFlavor())); }
@Override public Map<String, Metric> getMetrics() { final Map<String, Metric> gauges = new HashMap<>(); for (String pool : POOLS) { for (int i = 0; i < ATTRIBUTES.length; i++) { final String attribute = ATTRIBUTES[i]; final String name = NAMES[i]; try { final ObjectName on = new ObjectName("java.nio:type=BufferPool,name=" + pool); mBeanServer.getMBeanInfo(on); gauges.put(name(pool, name), new JmxAttributeGauge(mBeanServer, on, attribute)); } catch (JMException ignored) { LOGGER.debug("Unable to load buffer pool MBeans, possibly running on Java 6"); } } } return Collections.unmodifiableMap(gauges); }
@Test public void includesGaugesForDirectAndMappedPools() { assertThat(buffers.getMetrics().keySet()) .containsOnly("direct.count", "mapped.used", "mapped.capacity", "direct.capacity", "mapped.count", "direct.used"); }
@SuppressWarnings("unchecked") public void isEquivalentAccordingToCompareTo(@Nullable T expected) { if (checkNotNull((Comparable<Object>) actual).compareTo(checkNotNull(expected)) != 0) { failWithActual("expected value that sorts equal to", expected); } }
@Test public void isEquivalentAccordingToCompareTo() { assertThat(new StringComparedByLength("abc")) .isEquivalentAccordingToCompareTo(new StringComparedByLength("xyz")); expectFailureWhenTestingThat(new StringComparedByLength("abc")) .isEquivalentAccordingToCompareTo(new StringComparedByLength("abcd")); assertFailureValue("expected value that sorts equal to", "abcd"); }
public static MetadataCoder of() { return INSTANCE; }
@Test public void testEncodeDecodeWithDefaultLastModifiedMills() throws Exception { Path filePath = tmpFolder.newFile("somefile").toPath(); Metadata metadata = Metadata.builder() .setResourceId( FileSystems.matchNewResource(filePath.toString(), false /* isDirectory */)) .setIsReadSeekEfficient(true) .setSizeBytes(1024) .build(); CoderProperties.coderDecodeEncodeEqual(MetadataCoder.of(), metadata); }
final boolean consumeBytes(int streamId, int bytes) throws Http2Exception { Http2Stream stream = connection().stream(streamId); // Upgraded requests are ineligible for stream control. We add the null check // in case the stream has been deregistered. if (stream != null && streamId == Http2CodecUtil.HTTP_UPGRADE_STREAM_ID) { Boolean upgraded = stream.getProperty(upgradeKey); if (Boolean.TRUE.equals(upgraded)) { return false; } } return connection().local().flowController().consumeBytes(stream, bytes); }
@Test public void flowControlShouldBeResilientToMissingStreams() throws Http2Exception { Http2Connection conn = new DefaultHttp2Connection(true); Http2ConnectionEncoder enc = new DefaultHttp2ConnectionEncoder(conn, new DefaultHttp2FrameWriter()); Http2ConnectionDecoder dec = new DefaultHttp2ConnectionDecoder(conn, enc, new DefaultHttp2FrameReader()); Http2FrameCodec codec = new Http2FrameCodec(enc, dec, new Http2Settings(), false, true); EmbeddedChannel em = new EmbeddedChannel(codec); // We call #consumeBytes on a stream id which has not been seen yet to emulate the case // where a stream is deregistered which in reality can happen in response to a RST. assertFalse(codec.consumeBytes(1, 1)); assertTrue(em.finishAndReleaseAll()); }
@Override public Path move(final Path source, final Path target, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException { if(containerService.isContainer(source)) { if(new SimplePathPredicate(source.getParent()).test(target.getParent())) { // Rename only return proxy.move(source, target, status, callback, connectionCallback); } } if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(source) ^ new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(target))) { // Moving into or from an encrypted room final Copy copy = new SDSDelegatingCopyFeature(session, nodeid, new SDSCopyFeature(session, nodeid)); if(log.isDebugEnabled()) { log.debug(String.format("Move %s to %s using copy feature %s", source, target, copy)); } final Path c = copy.copy(source, target, status, connectionCallback, new DisabledStreamListener()); // Delete source file after copy is complete final Delete delete = new SDSDeleteFeature(session, nodeid); if(delete.isSupported(source)) { log.warn(String.format("Delete source %s copied to %s", source, target)); delete.delete(Collections.singletonMap(source, status), connectionCallback, callback); } return c; } else { return proxy.move(source, target, status, callback, connectionCallback); } }
@Test public void testMoveToDifferentParentAndRename() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(new Path( new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final String filename = new AlphanumericRandomStringService().random(); final Path test = new SDSTouchFeature(session, nodeid).touch(new Path(room, filename, EnumSet.of(Path.Type.file)), new TransferStatus()); final Path target = new SDSTouchFeature(session, nodeid).touch(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); new SDSMoveFeature(session, nodeid).move(test, target, new TransferStatus().exists(true), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertFalse(new SDSFindFeature(session, nodeid).find(new Path(room, filename, EnumSet.of(Path.Type.file)))); assertTrue(new SDSFindFeature(session, nodeid).find(target)); new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static VerificationMode between(final int min, final int max) { checkArgument(min >= 0, "Min should be greater than or equal to 0"); checkArgument(max > min, "Max should be greater than min"); return new BetweenVerification(min, max); }
@Test public void should_verify_expected_request_for_between() throws Exception { final HttpServer server = httpServer(port(), hit); server.get(by(uri("/foo"))).response("bar"); running(server, () -> { assertThat(helper.get(remoteUrl("/foo")), is("bar")); assertThat(helper.get(remoteUrl("/foo")), is("bar")); assertThat(helper.get(remoteUrl("/foo")), is("bar")); }); hit.verify(by(uri("/foo")), between(1, 3)); }
@VisibleForTesting @SuppressWarnings("deprecation") public static boolean isOnlyDictionaryEncodingPages(ColumnChunkMetaData columnMetaData) { // Files written with newer versions of Parquet libraries (e.g. parquet-mr 1.9.0) will have EncodingStats available // Otherwise, fallback to v1 logic EncodingStats stats = columnMetaData.getEncodingStats(); if (stats != null) { return stats.hasDictionaryPages() && !stats.hasNonDictionaryEncodedPages(); } Set<Encoding> encodings = columnMetaData.getEncodings(); if (encodings.contains(PLAIN_DICTIONARY)) { // PLAIN_DICTIONARY was present, which means at least one page was // dictionary-encoded and 1.0 encodings are used // The only other allowed encodings are RLE and BIT_PACKED which are used for repetition or definition levels return Sets.difference(encodings, ImmutableSet.of(PLAIN_DICTIONARY, RLE, BIT_PACKED)).isEmpty(); } return false; }
@Test @SuppressWarnings("deprecation") public void testDictionaryEncodingV2() { assertTrue(isOnlyDictionaryEncodingPages(createColumnMetaDataV2(RLE_DICTIONARY))); assertTrue(isOnlyDictionaryEncodingPages(createColumnMetaDataV2(PLAIN_DICTIONARY))); assertFalse(isOnlyDictionaryEncodingPages(createColumnMetaDataV2(PLAIN))); // Simulate fallback to plain encoding e.g. too many unique entries assertFalse(isOnlyDictionaryEncodingPages(createColumnMetaDataV2(RLE_DICTIONARY, PLAIN))); }
public static Optional<Constructor<?>> findConstructor(Class<?> clazz, Class<?>[] paramsTypes) { if (clazz == null) { return Optional.empty(); } // Add to constructor cache return CONSTRUCTOR_CACHE.computeIfAbsent(buildMethodKey(clazz, "<init>", paramsTypes), key -> { try { return Optional.of(setObjectAccessible(clazz.getDeclaredConstructor(paramsTypes))); } catch (NoSuchMethodException e) { LOGGER.warning(String.format(Locale.ENGLISH, "Can not find constructor for class [%s] with params [%s]", clazz.getName(), Arrays.toString(paramsTypes))); return Optional.empty(); } }); }
@Test public void findConstructor() { final Optional<Constructor<?>> constructor = ReflectUtils.findConstructor(TestReflect.class, null); Assert.assertTrue(constructor.isPresent()); final Optional<Constructor<?>> paramsCons = ReflectUtils.findConstructor(TestReflect.class, new Class[] {int.class, int.class}); Assert.assertTrue(paramsCons.isPresent()); final Optional<Constructor<?>> noFoundCons = ReflectUtils.findConstructor(TestReflect.class, new Class[] {Integer.class, Integer.class}); Assert.assertFalse(noFoundCons.isPresent()); }
public List<Exception> errors() { return Collections.unmodifiableList(this.errors); }
@Test void errors() { final var e = new BusinessException("unhandled"); final var retry = new Retry<String>( () -> { throw e; }, 2, 0 ); try { retry.perform(); } catch (BusinessException ex) { //ignore } assertThat(retry.errors(), hasItem(e)); }
@Override public EurekaHttpClient newClient(EurekaEndpoint endpoint) { // we want a copy to modify. Don't change the original WebClient.Builder builder = this.builderSupplier.get().clone(); setUrl(builder, endpoint.getServiceUrl()); setCodecs(builder); builder.filter(http4XxErrorExchangeFilterFunction()); return new WebClientEurekaHttpClient(builder.build()); }
@Test void testUserInfoWithEncodedCharacters() { String encodedBasicAuth = HttpHeaders.encodeBasicAuth("test", "MyPassword@", null); String expectedAuthHeader = "Basic " + encodedBasicAuth; String expectedUrl = "http://localhost:8761"; WebClientEurekaHttpClient client = (WebClientEurekaHttpClient) transportClientFatory .newClient(new DefaultEndpoint("http://test:MyPassword%40@localhost:8761")); client.getWebClient().get().retrieve().bodyToMono(Void.class).block(Duration.ofSeconds(10)); ClientRequest request = verifyAndGetRequest(); assertThat(request.headers().getFirst(HttpHeaders.AUTHORIZATION)).isEqualTo(expectedAuthHeader); assertThat(request.url().toString()).isEqualTo(expectedUrl); }
@Override protected Mono<Void> handleRuleIfNull(final String pluginName, final ServerWebExchange exchange, final ShenyuPluginChain chain) { return WebFluxResultUtils.noRuleResult(pluginName, exchange); }
@Test public void handleRuleIfNullTest() { assertNotNull(dividePlugin.handleRuleIfNull(PluginEnum.DIVIDE.getName(), exchange, chain)); }
public static SocketAddress updatePort(@Nullable Supplier<? extends SocketAddress> address, int port) { if (address == null) { return createUnresolved(NetUtil.LOCALHOST.getHostAddress(), port); } SocketAddress socketAddress = address.get(); if (socketAddress instanceof DomainSocketAddress) { throw new IllegalArgumentException("Cannot update DomainSocketAddress with post number [" + port + "]."); } if (!(address.get() instanceof InetSocketAddress)) { return createUnresolved(NetUtil.LOCALHOST.getHostAddress(), port); } InetSocketAddress inet = (InetSocketAddress) address.get(); InetAddress addr = inet.getAddress(); String host = addr == null ? inet.getHostName() : addr.getHostAddress(); return createUnresolved(host, port); }
@Test void updatePortBadValues() { assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> AddressUtils.updatePort(null, -1)) .withMessage("port out of range:-1"); }
public RunList<R> byTimestamp(final long start, final long end) { return limit(new CountingPredicate<>() { @Override public boolean apply(int index, R r) { return start <= r.getTimeInMillis(); } }).filter((Predicate<R>) r -> r.getTimeInMillis() < end); }
@Test public void byTimestampAllRuns() { setUpByTimestampRuns(); RunList<Run> tested = rlist.byTimestamp(0, 400); assertEquals(2, tested.toArray().length); }
@NonNull public Data toData() { return new Data(buffer.toByteArray()); }
@Test public void toData() { final DataStream stream = new DataStream(); stream.write(new byte[] { 0, 1, 2, 3, 4, 5, 6}); final Data data = stream.toData(); assertEquals(0x100, Objects.requireNonNull(data.getIntValue(Data.FORMAT_UINT16_LE, 0)).intValue()); }
public static void run(String resolverPath, String optionalDefault, String typeRefPropertiesExcludeList, boolean overrideNamespace, String targetDirectoryPath, String[] sources) throws IOException { final AvroSchemaGenerator generator = new AvroSchemaGenerator(new Config(resolverPath)); if (optionalDefault != null) { final OptionalDefaultMode optionalDefaultMode = OptionalDefaultMode.valueOf(optionalDefault.toUpperCase()); generator.getDataToAvroSchemaTranslationOptions().setOptionalDefaultMode(optionalDefaultMode); } generator.getDataToAvroSchemaTranslationOptions().setOverrideNamespace(overrideNamespace); if (null != typeRefPropertiesExcludeList) { generator.getDataToAvroSchemaTranslationOptions().setTyperefPropertiesExcludeSet( Arrays.stream(typeRefPropertiesExcludeList.split(",")) .map(String::trim) .collect(Collectors.toSet())); } if (overrideNamespace) { targetDirectoryPath += "/" + AVRO_PREFIX; } generator.generate(targetDirectoryPath, sources); }
@Test(dataProvider = "toAvroSchemaData") public void testFullNameAsArgsWithJarInPath(Map<String, String> testSchemas, Map<String, String> expectedAvroSchemas, List<String> paths, boolean override) throws IOException { Map<File, Map.Entry<String,String>> files = TestUtil.createSchemaFiles(_testDir, testSchemas, _debug); // jar files in path, create jar files Collection<String> testPaths = createJarsFromRelativePaths(_testDir, testSchemas, paths, _debug); // test source is a fully qualified name File targetDir = setup(testPaths, override); for (Map.Entry<File, Map.Entry<String, String>> entry : files.entrySet()) { String schemaText = entry.getValue().getValue(); String schemaName = schemaFullName(schemaText); if (_debug) out.println("test name " + schemaName); String args[] = { targetDir.getCanonicalPath(), schemaName }; run(args, entry, targetDir, expectedAvroSchemas); } }
public void updateContextPath() { if (StringUtils.isNoneBlank(this.path)) { this.contextPath = StringUtils.indexOf(path, "/", 1) > -1 ? this.path.substring(0, StringUtils.indexOf(path, "/", 1)) : path; } }
@Test public void testUpdateContextPath() { MetaData metaData = new MetaData("id", "appName", "contextPath", "path", "rpcType", "serviceName", "methodName", "parameterTypes", "rpcExt", true); metaData.setPath(PATH); metaData.updateContextPath(); assertEquals(metaData.getContextPath(), CONTEXT_PATH); }
@Override public Map<K, V> getAll(Set<K> keys) { Map<K, V> result = createHashMap(keys.size()); for (K key : keys) { result.put(key, map.get(key)); } return result; }
@Test public void testGetAll() { map.put(23, "value-23"); map.put(42, "value-42"); Map<Integer, String> expectedResult = new HashMap<>(); expectedResult.put(23, "value-23"); expectedResult.put(42, "value-42"); Map<Integer, String> result = adapter.getAll(expectedResult.keySet()); assertEquals(expectedResult, result); }
public static LatLong interpolateLatLong(LatLong p1, LatLong p2, double fraction) { double maxLat = max(p1.latitude(), p2.latitude()); double minLat = min(p1.latitude(), p2.latitude()); checkArgument(maxLat - minLat <= 90.0, "Interpolation is unsafe at this distance (latitude)"); double maxLong = max(p1.longitude(), p2.longitude()); double minLong = min(p1.longitude(), p2.longitude()); checkArgument(maxLong - minLong <= 180.0, "Interpolation is unsafe at this distance (longitude)"); return new LatLong( interpolate(p1.latitude(), p2.latitude(), fraction), interpolate(p1.longitude(), p2.longitude(), fraction) ); }
@Test public void testUnsafeInterpolateLatLongFails() { LatLong p1 = new LatLong(89.0, 0.0); LatLong p2 = new LatLong(-89.0, 0.0); try { interpolateLatLong(p1, p2, 0.5); fail("This call should fail until the implementation is improved"); } catch (IllegalArgumentException iae) { assertTrue(iae.getMessage().contains("Interpolation is unsafe at this distance (latitude)")); } LatLong p3 = new LatLong(0.0, -178.0); LatLong p4 = new LatLong(0.0, 178.0); try { interpolateLatLong(p3, p4, 0.5); fail("This call should fail until the implementation is improved"); } catch (IllegalArgumentException iae) { assertTrue(iae.getMessage().contains("Interpolation is unsafe at this distance (longitude)")); } }
@Override public List<IncomingMessage> pull( long requestTimeMsSinceEpoch, SubscriptionPath subscription, int batchSize, boolean returnImmediately) throws IOException { PullRequest request = PullRequest.newBuilder() .setSubscription(subscription.getPath()) .setReturnImmediately(returnImmediately) .setMaxMessages(batchSize) .build(); PullResponse response = subscriberStub().pull(request); if (response.getReceivedMessagesCount() == 0) { return ImmutableList.of(); } List<IncomingMessage> incomingMessages = new ArrayList<>(response.getReceivedMessagesCount()); for (ReceivedMessage message : response.getReceivedMessagesList()) { PubsubMessage pubsubMessage = message.getMessage(); @Nullable Map<String, String> attributes = pubsubMessage.getAttributes(); // Timestamp. long timestampMsSinceEpoch; if (Strings.isNullOrEmpty(timestampAttribute)) { Timestamp timestampProto = pubsubMessage.getPublishTime(); checkArgument(timestampProto != null, "Pubsub message is missing timestamp proto"); timestampMsSinceEpoch = timestampProto.getSeconds() * 1000 + timestampProto.getNanos() / 1000L / 1000L; } else { timestampMsSinceEpoch = extractTimestampAttribute(timestampAttribute, attributes); } // Ack id. String ackId = message.getAckId(); checkState(!Strings.isNullOrEmpty(ackId)); // Record id, if any. @Nullable String recordId = null; if (idAttribute != null && attributes != null) { recordId = attributes.get(idAttribute); } if (Strings.isNullOrEmpty(recordId)) { // Fall back to the Pubsub provided message id. recordId = pubsubMessage.getMessageId(); } incomingMessages.add( IncomingMessage.of( pubsubMessage, timestampMsSinceEpoch, requestTimeMsSinceEpoch, ackId, recordId)); } return incomingMessages; }
@Test public void pullOneMessage() throws IOException { initializeClient(null, null); String expectedSubscription = SUBSCRIPTION.getPath(); final PullRequest expectedRequest = PullRequest.newBuilder() .setSubscription(expectedSubscription) .setReturnImmediately(true) .setMaxMessages(10) .build(); Timestamp timestamp = Timestamp.newBuilder() .setSeconds(PUB_TIME_MS / 1000) .setNanos((int) (PUB_TIME_MS % 1000) * 1000 * 1000) .build(); PubsubMessage expectedPubsubMessage = PubsubMessage.newBuilder() .setMessageId(MESSAGE_ID) .setData(ByteString.copyFrom(DATA.getBytes(StandardCharsets.UTF_8))) .setPublishTime(timestamp) .build(); ReceivedMessage expectedReceivedMessage = ReceivedMessage.newBuilder().setMessage(expectedPubsubMessage).setAckId(ACK_ID).build(); final PullResponse response = PullResponse.newBuilder() .addAllReceivedMessages(ImmutableList.of(expectedReceivedMessage)) .build(); final List<PullRequest> requestsReceived = new ArrayList<>(); SubscriberImplBase subscriberImplBase = new SubscriberImplBase() { @Override public void pull(PullRequest request, StreamObserver<PullResponse> responseObserver) { requestsReceived.add(request); responseObserver.onNext(response); responseObserver.onCompleted(); } }; Server server = InProcessServerBuilder.forName(channelName).addService(subscriberImplBase).build().start(); try { List<IncomingMessage> actualMessages = client.pull(REQ_TIME_MS, SUBSCRIPTION, 10, true); assertEquals(1, actualMessages.size()); IncomingMessage actualMessage = actualMessages.get(0); assertEquals(ACK_ID, actualMessage.ackId()); assertEquals(DATA, actualMessage.message().getData().toStringUtf8()); assertEquals(MESSAGE_ID, actualMessage.recordId()); assertEquals(REQ_TIME_MS, actualMessage.requestTimeMsSinceEpoch()); assertEquals(PUB_TIME_MS, actualMessage.timestampMsSinceEpoch()); assertEquals(expectedRequest, Iterables.getOnlyElement(requestsReceived)); } finally { server.shutdownNow(); } }
public static String toJson(Object obj) { try { return mapper.writeValueAsString(obj); } catch (JsonProcessingException e) { throw new NacosSerializationException(obj.getClass(), e); } }
@Test void testToJson2() { assertThrows(NacosSerializationException.class, () -> { // object without field will throw exceptions JacksonUtils.toJson(new Object()); }); }
public static String getLocalMacAddress() { return getMacAddress(getLocalhost()); }
@Test @Disabled public void getLocalMacAddressTest() { final String macAddress = NetUtil.getLocalMacAddress(); assertNotNull(macAddress); // 验证MAC地址正确 final boolean match = ReUtil.isMatch(PatternPool.MAC_ADDRESS, macAddress); assertTrue(match); }
@Override public V put(K key, V value) { // will throw UnsupportedOperationException; delegate anyway for testability return underlying().put(key, value); }
@Test public void testDelegationOfUnsupportedFunctionPut() { new PCollectionsHashMapWrapperDelegationChecker<>() .defineMockConfigurationForUnsupportedFunction(mock -> mock.put(eq(this), eq(this))) .defineWrapperUnsupportedFunctionInvocation(wrapper -> wrapper.put(this, this)) .doUnsupportedFunctionDelegationCheck(); }
@Override public Set<Long> calculateUsers(DelegateExecution execution, String param) { Set<Long> groupIds = StrUtils.splitToLongSet(param); List<BpmUserGroupDO> groups = userGroupService.getUserGroupList(groupIds); return convertSetByFlatMap(groups, BpmUserGroupDO::getUserIds, Collection::stream); }
@Test public void testCalculateUsers() { // 准备参数 String param = "1,2"; // mock 方法 BpmUserGroupDO userGroup1 = randomPojo(BpmUserGroupDO.class, o -> o.setUserIds(asSet(11L, 12L))); BpmUserGroupDO userGroup2 = randomPojo(BpmUserGroupDO.class, o -> o.setUserIds(asSet(21L, 22L))); when(userGroupService.getUserGroupList(eq(asSet(1L, 2L)))).thenReturn(Arrays.asList(userGroup1, userGroup2)); // 调用 Set<Long> results = strategy.calculateUsers(null, param); // 断言 assertEquals(asSet(11L, 12L, 21L, 22L), results); }
Bootstrap getBootstrap() { return bootstrap; }
@Test void testSetKeepaliveOptionWithEpoll() throws Exception { assumeThat(Epoll.isAvailable()).isTrue(); final Configuration config = new Configuration(); config.set(NettyShuffleEnvironmentOptions.TRANSPORT_TYPE, "epoll"); config.set(NettyShuffleEnvironmentOptions.CLIENT_TCP_KEEP_IDLE_SECONDS, 300); config.set(NettyShuffleEnvironmentOptions.CLIENT_TCP_KEEP_INTERVAL_SECONDS, 10); config.set(NettyShuffleEnvironmentOptions.CLIENT_TCP_KEEP_COUNT, 8); try (NetUtils.Port clientPort = NetUtils.getAvailablePort()) { final NettyClient client = createNettyClient(config, clientPort); Map<ChannelOption<?>, Object> options = client.getBootstrap().config().options(); assertThat(options) .containsEntry(EpollChannelOption.TCP_KEEPIDLE, 300) .containsEntry(EpollChannelOption.TCP_KEEPINTVL, 10) .containsEntry(EpollChannelOption.TCP_KEEPCNT, 8); } }
@Override public void upgrade() { if (hasBeenRunSuccessfully()) { LOG.debug("Migration already completed."); return; } final Set<String> dashboardIdToViewId = new HashSet<>(); final Consumer<String> recordMigratedDashboardIds = dashboardIdToViewId::add; final Map<String, Set<String>> widgetIdMigrationMapping = new HashMap<>(); final Consumer<Map<String, Set<String>>> recordMigratedWidgetIds = widgetIdMigrationMapping::putAll; final Map<View, Search> newViews = this.dashboardsService.streamAll() .sorted(Comparator.comparing(Dashboard::id)) .map(dashboard -> migrateDashboard(dashboard, recordMigratedDashboardIds, recordMigratedWidgetIds)) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); writeViews(newViews); final MigrationCompleted migrationCompleted = MigrationCompleted.create(dashboardIdToViewId, widgetIdMigrationMapping); writeMigrationCompleted(migrationCompleted); }
@Test @MongoDBFixtures("dashboard_with_no_widget_positions.json") public void migratesADashboardWithNoWidgetPositions() { this.migration.upgrade(); final MigrationCompleted migrationCompleted = captureMigrationCompleted(); assertThat(migrationCompleted.migratedDashboardIds()).containsExactly("5ddf8ed5b2d44b2e04472992"); assertThat(migrationCompleted.widgetMigrationIds()).hasSize(16); verify(viewService, times(1)).save(any()); verify(searchService, times(1)).save(any()); }
public static LookupDefaultMultiValue create(String valueString, LookupDefaultValue.Type valueType) { requireNonNull(valueString, "valueString cannot be null"); requireNonNull(valueType, "valueType cannot be null"); Map<Object, Object> value; try { switch (valueType) { case OBJECT: value = OBJECT_MAPPER.readValue(valueString, TypeReferences.MAP_OBJECT_OBJECT); break; case NULL: value = null; break; default: throw new IllegalArgumentException("Could not convert <" + valueString + "> to multi value type <" + valueType + ">"); } } catch (IllegalArgumentException e) { throw e; } catch (Exception e) { throw new IllegalArgumentException("Could not parse JSON "+ valueType.toString().toLowerCase(Locale.ENGLISH) + " value <" + valueString + ">", e); } return builder() .valueString(valueString) .valueType(valueType) .value(value) .build(); }
@Test public void createSingleString() throws Exception { expectedException.expect(IllegalArgumentException.class); LookupDefaultMultiValue.create("foo", LookupDefaultMultiValue.Type.STRING); }
@PUT @Path("{id}") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response updatePort(@PathParam("id") String id, InputStream input) throws IOException { log.trace(String.format(MESSAGE, "UPDATE " + id)); String inputStr = IOUtils.toString(input, REST_UTF8); if (!haService.isActive() && !DEFAULT_ACTIVE_IP_ADDRESS.equals(haService.getActiveIp())) { return syncPut(haService, PORTS, id, inputStr); } final NeutronPort port = (NeutronPort) jsonToModelEntity(inputStr, NeutronPort.class); adminService.updatePort(port); ObjectMapper mapper = new ObjectMapper(); ObjectNode jsonNode = mapper.createObjectNode(); OpenstackNode node = nodeService.node(port.getHostId()); if (node == null) { return status(Response.Status.OK).build(); } else if (node.datapathType().equals(DpdkConfig.DatapathType.NETDEV)) { log.debug("UpdatePort for port {} called in netdev device {} " + "so sends vif type as a payload of the response", port.getId(), node.hostname()); jsonNode.put(VIF_TYPE, VHOSTUSER); if (node.socketDir() != null) { jsonNode.put(SOCKET_DIR, node.socketDir()); } return status(Response.Status.OK).entity(jsonNode.toString()).build(); } else { return status(Response.Status.OK).build(); } }
@Test public void testUpdatePortWithUpdatingOperation() { expect(mockOpenstackHaService.isActive()).andReturn(true).anyTimes(); replay(mockOpenstackHaService); mockOpenstackNetworkAdminService.updatePort(anyObject()); replay(mockOpenstackNetworkAdminService); final WebTarget wt = target(); InputStream jsonStream = OpenstackNetworkWebResourceTest.class .getResourceAsStream("openstack-port.json"); Response response = wt.path(PATH + "/65c0ee9f-d634-4522-8954-51021b570b0d") .request(MediaType.APPLICATION_JSON_TYPE) .put(Entity.json(jsonStream)); final int status = response.getStatus(); assertThat(status, is(200)); verify(mockOpenstackNetworkAdminService); }
@Override public boolean equals(Object other) { if (!(other instanceof Tenant)) { return false; } Tenant that = (Tenant) other; return name.equals(that.name); }
@Test public void equals() { assertEquals(t1, t2); assertNotEquals(t1, t3); assertNotEquals(t1, t4); assertNotEquals(t3, t4); }
public static String namespaceFilePrefix(String namespace) { return String.format(PREFIX_FORMAT_NAMESPACE_FILE, namespace.replace(".", "/")); }
@Test void shouldGetNamespaceFilePrefix() { assertThat(StorageContext.namespaceFilePrefix("io.namespace"), is("/io/namespace/_files")); }
public static byte[] getValue(byte[] raw) { try (final Asn1InputStream is = new Asn1InputStream(raw)) { is.readTag(); return is.read(is.readLength()); } }
@Test public void getValueShouldSkipTagAndExtendedLength() { final byte[] data = new byte[0x81]; new SecureRandom().nextBytes(data); final byte[] obj = Arrays.concatenate(new byte[] { 0x10, (byte) 0x81, (byte) 0x81 }, data); assertArrayEquals(data, Asn1Utils.getValue(obj)); }
@Override public void setConf(Configuration conf) { this.conf = conf; uid = conf.getInt(UID, 0); user = conf.get(USER); if (null == user) { try { user = UserGroupInformation.getCurrentUser().getShortUserName(); } catch (IOException e) { user = "hadoop"; } } gid = conf.getInt(GID, 1); group = conf.get(GROUP); if (null == group) { group = user; } resetUGInfo(); addUser(user, uid); addGroup(group, gid); }
@Test(expected=IllegalStateException.class) public void testDuplicateIds() { Configuration conf = new Configuration(false); conf.setInt(SingleUGIResolver.UID, 4344); conf.setInt(SingleUGIResolver.GID, 4344); conf.set(SingleUGIResolver.USER, TESTUSER); conf.set(SingleUGIResolver.GROUP, TESTGROUP); ugi.setConf(conf); ugi.ugiMap(); }
public static Map<String, String> computeAliases(PluginScanResult scanResult) { Map<String, Set<String>> aliasCollisions = new HashMap<>(); scanResult.forEach(pluginDesc -> { aliasCollisions.computeIfAbsent(simpleName(pluginDesc), ignored -> new HashSet<>()).add(pluginDesc.className()); aliasCollisions.computeIfAbsent(prunedName(pluginDesc), ignored -> new HashSet<>()).add(pluginDesc.className()); }); Map<String, String> aliases = new HashMap<>(); for (Map.Entry<String, Set<String>> entry : aliasCollisions.entrySet()) { String alias = entry.getKey(); Set<String> classNames = entry.getValue(); if (classNames.size() == 1) { aliases.put(alias, classNames.stream().findAny().get()); } else { log.debug("Ignoring ambiguous alias '{}' since it refers to multiple distinct plugins {}", alias, classNames); } } return aliases; }
@SuppressWarnings("unchecked") @Test public void testCollidingSimpleAlias() { SortedSet<PluginDesc<Converter>> converters = new TreeSet<>(); converters.add(new PluginDesc<>(CollidingConverter.class, null, PluginType.CONVERTER, CollidingConverter.class.getClassLoader())); SortedSet<PluginDesc<Transformation<?>>> transformations = new TreeSet<>(); transformations.add(new PluginDesc<>((Class<? extends Transformation<?>>) (Class<?>) Colliding.class, null, PluginType.TRANSFORMATION, Colliding.class.getClassLoader())); PluginScanResult result = new PluginScanResult( Collections.emptySortedSet(), Collections.emptySortedSet(), converters, Collections.emptySortedSet(), transformations, Collections.emptySortedSet(), Collections.emptySortedSet(), Collections.emptySortedSet(), Collections.emptySortedSet() ); Map<String, String> actualAliases = PluginUtils.computeAliases(result); Map<String, String> expectedAliases = new HashMap<>(); expectedAliases.put("CollidingConverter", CollidingConverter.class.getName()); assertEquals(expectedAliases, actualAliases); }
@Override public Map<String, String> getAllVariables() { return internalGetAllVariables(0, Collections.emptySet()); }
@Test void testGetAllVariables() throws Exception { MetricRegistryImpl registry = new MetricRegistryImpl( MetricRegistryTestUtils.defaultMetricRegistryConfiguration()); AbstractMetricGroup group = new AbstractMetricGroup<AbstractMetricGroup<?>>(registry, new String[0], null) { @Override protected QueryScopeInfo createQueryServiceMetricInfo(CharacterFilter filter) { return null; } @Override protected String getGroupName(CharacterFilter filter) { return ""; } }; assertThat(group.getAllVariables()).isEmpty(); registry.closeAsync().get(); }
public static RelDataType toCalciteRowType(Schema schema, RelDataTypeFactory dataTypeFactory) { RelDataTypeFactory.Builder builder = new RelDataTypeFactory.Builder(dataTypeFactory); IntStream.range(0, schema.getFieldCount()) .forEach( idx -> builder.add( schema.getField(idx).getName(), toRelDataType(dataTypeFactory, schema, idx))); return builder.build(); }
@Test public void testToCalciteRowType() { final Schema schema = Schema.builder() .addField("f1", Schema.FieldType.BYTE) .addField("f2", Schema.FieldType.INT16) .addField("f3", Schema.FieldType.INT32) .addField("f4", Schema.FieldType.INT64) .addField("f5", Schema.FieldType.FLOAT) .addField("f6", Schema.FieldType.DOUBLE) .addField("f7", Schema.FieldType.DECIMAL) .addField("f8", Schema.FieldType.BOOLEAN) .addField("f9", Schema.FieldType.BYTES) .addField("f10", Schema.FieldType.STRING) .build(); final Map<String, RelDataType> fields = calciteRowTypeFields(schema); assertEquals(10, fields.size()); fields.values().forEach(x -> assertFalse(x.isNullable())); assertEquals(SqlTypeName.TINYINT, fields.get("f1").getSqlTypeName()); assertEquals(SqlTypeName.SMALLINT, fields.get("f2").getSqlTypeName()); assertEquals(SqlTypeName.INTEGER, fields.get("f3").getSqlTypeName()); assertEquals(SqlTypeName.BIGINT, fields.get("f4").getSqlTypeName()); assertEquals(SqlTypeName.FLOAT, fields.get("f5").getSqlTypeName()); assertEquals(SqlTypeName.DOUBLE, fields.get("f6").getSqlTypeName()); assertEquals(SqlTypeName.DECIMAL, fields.get("f7").getSqlTypeName()); assertEquals(SqlTypeName.BOOLEAN, fields.get("f8").getSqlTypeName()); assertEquals(SqlTypeName.VARBINARY, fields.get("f9").getSqlTypeName()); assertEquals(SqlTypeName.VARCHAR, fields.get("f10").getSqlTypeName()); }
@Override public List<RedisClientInfo> getClientList(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<List<String>> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST); List<String> list = syncFuture(f); return CONVERTER.convert(list.toArray(new String[list.size()])); }
@Test public void testGetClientList() { RedisClusterNode master = getFirstMaster(); List<RedisClientInfo> list = connection.getClientList(master); assertThat(list.size()).isGreaterThan(10); }
@Bean("ScmChangedFiles") public ScmChangedFiles provide(ScmConfiguration scmConfiguration, BranchConfiguration branchConfiguration, DefaultInputProject project) { Path rootBaseDir = project.getBaseDir(); Set<ChangedFile> changedFiles = loadChangedFilesIfNeeded(scmConfiguration, branchConfiguration, rootBaseDir); if (changedFiles != null) { validatePaths(getAbsoluteFilePaths(changedFiles)); } return new ScmChangedFiles(changedFiles); }
@Test public void testGitScmProvider(){ GitScmProvider gitScmProvider = mock(GitScmProvider.class); when(scmConfiguration.provider()).thenReturn(gitScmProvider); when(branchConfiguration.isPullRequest()).thenReturn(true); when(branchConfiguration.targetBranchName()).thenReturn("target"); ScmChangedFiles scmChangedFiles = provider.provide(scmConfiguration, branchConfiguration, project); assertThat(scmChangedFiles.get()).isEmpty(); verify(scmConfiguration).provider(); }
private Function<KsqlConfig, Kudf> getUdfFactory( final Method method, final UdfDescription udfDescriptionAnnotation, final String functionName, final FunctionInvoker invoker, final String sensorName ) { return ksqlConfig -> { final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance( method.getDeclaringClass(), udfDescriptionAnnotation.name()); if (actualUdf instanceof Configurable) { ExtensionSecurityManager.INSTANCE.pushInUdf(); try { ((Configurable) actualUdf) .configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName)); } finally { ExtensionSecurityManager.INSTANCE.popOutUdf(); } } final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf); return metrics.<Kudf>map(m -> new UdfMetricProducer( m.getSensor(sensorName), theUdf, Time.SYSTEM )).orElse(theUdf); }; }
@Test public void shouldLoadFunctionWithSchemaProvider() { // Given: final UdfFactory returnDecimal = FUNC_REG.getUdfFactory(FunctionName.of("returndecimal")); // When: final SqlDecimal decimal = SqlTypes.decimal(2, 1); final List<SqlArgument> args = Collections.singletonList(SqlArgument.of(decimal)); final KsqlScalarFunction function = returnDecimal.getFunction(args); // Then: assertThat(function.getReturnType(args), equalTo(decimal)); }
@Override public SuspensionReasons verifyGroupGoingDownIsFine(ClusterApi clusterApi) throws HostStateChangeDeniedException { return verifyGroupGoingDownIsFine(clusterApi, false); }
@Test public void verifyGroupGoingDownIsFine_noServicesOutsideGroupIsDownIsFine() throws HostStateChangeDeniedException { verifyGroupGoingDownIsFine(true, Optional.empty(), 13, true); }
public long getLastModified() { return last_modified; }
@Test public void getLastModified() { assertEquals(TestParameters.VP_LAST_MODIFIED, chmItsfHeader.getLastModified()); }
@Override public void loadConfiguration(NacosLoggingProperties loggingProperties) { String location = loggingProperties.getLocation(); configurator.setLoggingProperties(loggingProperties); LoggerContext loggerContext = loadConfigurationOnStart(location); if (hasNoListener(loggerContext)) { addListener(loggerContext, location); } }
@Test void testLoadConfigurationReload() { LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory(); loggerContext.putObject(CoreConstants.RECONFIGURE_ON_CHANGE_TASK, new ReconfigureOnChangeTask()); logbackNacosLoggingAdapter.loadConfiguration(loggingProperties); loggerContext.reset(); verify(loggerContextListener).onReset(loggerContext); for (Logger each : loggerContext.getLoggerList()) { if (!"com.alibaba.nacos.client.naming".equals(each.getName())) { continue; } assertNotNull(each.getAppender("ASYNC-NAMING")); } }
public static ExpressionTree parseFilterTree(String filter) throws MetaException { return PartFilterParser.parseFilter(filter); }
@Test public void testParseFilterWithInvalidDateWithoutTypeNorQuoted() { MetaException exception = assertThrows(MetaException.class, () -> PartFilterExprUtil.parseFilterTree("(j = 2023-06-32)")); assertTrue(exception.getMessage().contains("Error parsing partition filter")); }
@Override @DSTransactional // 多数据源,使用 @DSTransactional 保证本地事务,以及数据源的切换 public void updateTenantPackage(TenantPackageSaveReqVO updateReqVO) { // 校验存在 TenantPackageDO tenantPackage = validateTenantPackageExists(updateReqVO.getId()); // 更新 TenantPackageDO updateObj = BeanUtils.toBean(updateReqVO, TenantPackageDO.class); tenantPackageMapper.updateById(updateObj); // 如果菜单发生变化,则修改每个租户的菜单 if (!CollUtil.isEqualList(tenantPackage.getMenuIds(), updateReqVO.getMenuIds())) { List<TenantDO> tenants = tenantService.getTenantListByPackageId(tenantPackage.getId()); tenants.forEach(tenant -> tenantService.updateTenantRoleMenu(tenant.getId(), updateReqVO.getMenuIds())); } }
@Test public void testUpdateTenantPackage_notExists() { // 准备参数 TenantPackageSaveReqVO reqVO = randomPojo(TenantPackageSaveReqVO.class); // 调用, 并断言异常 assertServiceException(() -> tenantPackageService.updateTenantPackage(reqVO), TENANT_PACKAGE_NOT_EXISTS); }