focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@ScalarFunction(nullableParameters = true) public static byte[] toHLL(@Nullable Object input) { return toHLL(input, CommonConstants.Helix.DEFAULT_HYPERLOGLOG_LOG2M); }
@Test public void hllCreation() { for (Object i : _inputs) { Assert.assertEquals(hllEstimate(SketchFunctions.toHLL(i)), 1); Assert.assertEquals(hllEstimate(SketchFunctions.toHLL(i, 8)), 1); } Assert.assertEquals(hllEstimate(SketchFunctions.toHLL(null)), 0); Assert.assertEquals(hllEstimate(SketchFunctions.toHLL(null, 8)), 0); }
public void requireAtLeast(final int requiredMajor, final int requiredMinor) { final Version required = new Version(requiredMajor, requiredMinor); if (this.compareTo(required) < 0) { throw new UnsupportedOperationException( "This operation requires API version at least " + requiredMajor + "." + requiredMinor + ", currently configured for " + major + "." + minor); } }
@Test public void shouldObserveApiLimitsOnMajorVersions() { assertThrows(UnsupportedOperationException.class, () -> V35_0.requireAtLeast(36, 0)); }
public Class getResClass(String service, String methodName) { String key = service + "#" + methodName; Class reqClass = responseClassCache.get(key); if (reqClass == null) { // 读取接口里的方法参数和返回值 String interfaceClass = ConfigUniqueNameGenerator.getInterfaceName(service); Class clazz = ClassUtils.forName(interfaceClass, true); loadProtoClassToCache(key, clazz, methodName); } return responseClassCache.get(key); }
@Test public void getResClass() { Class res = protobufHelper.getResClass( "com.alipay.sofa.rpc.codec.protobuf.ProtoService", "echoStr"); Assert.assertTrue(res == EchoStrRes.class); }
public Tuple2<Long, Long> cancel() throws Exception { List<Tuple2<Future<? extends StateObject>, String>> pairs = new ArrayList<>(); pairs.add(new Tuple2<>(getKeyedStateManagedFuture(), "managed keyed")); pairs.add(new Tuple2<>(getKeyedStateRawFuture(), "managed operator")); pairs.add(new Tuple2<>(getOperatorStateManagedFuture(), "raw keyed")); pairs.add(new Tuple2<>(getOperatorStateRawFuture(), "raw operator")); pairs.add(new Tuple2<>(getInputChannelStateFuture(), "input channel")); pairs.add(new Tuple2<>(getResultSubpartitionStateFuture(), "result subpartition")); final long[] sizeTuple = new long[2]; try (Closer closer = Closer.create()) { for (Tuple2<Future<? extends StateObject>, String> pair : pairs) { closer.register( () -> { try { Tuple2<Long, Long> tuple = discardStateFuture(pair.f0); sizeTuple[0] += tuple.f0; sizeTuple[1] += tuple.f1; } catch (Exception e) { throw new RuntimeException( String.format( "Could not properly cancel %s state future", pair.f1), e); } }); } } return Tuple2.of(sizeTuple[0], sizeTuple[1]); }
@Test void testCancelAndCleanup() throws Exception { OperatorSnapshotFutures operatorSnapshotResult = new OperatorSnapshotFutures(); operatorSnapshotResult.cancel(); KeyedStateHandle keyedManagedStateHandle = mock(KeyedStateHandle.class); SnapshotResult<KeyedStateHandle> keyedStateManagedResult = SnapshotResult.of(keyedManagedStateHandle); RunnableFuture<SnapshotResult<KeyedStateHandle>> keyedStateManagedFuture = spy(DoneFuture.of(keyedStateManagedResult)); KeyedStateHandle keyedRawStateHandle = mock(KeyedStateHandle.class); SnapshotResult<KeyedStateHandle> keyedStateRawResult = SnapshotResult.of(keyedRawStateHandle); RunnableFuture<SnapshotResult<KeyedStateHandle>> keyedStateRawFuture = spy(DoneFuture.of(keyedStateRawResult)); OperatorStateHandle operatorManagedStateHandle = mock(OperatorStreamStateHandle.class); SnapshotResult<OperatorStateHandle> operatorStateManagedResult = SnapshotResult.of(operatorManagedStateHandle); RunnableFuture<SnapshotResult<OperatorStateHandle>> operatorStateManagedFuture = spy(DoneFuture.of(operatorStateManagedResult)); OperatorStateHandle operatorRawStateHandle = mock(OperatorStreamStateHandle.class); SnapshotResult<OperatorStateHandle> operatorStateRawResult = SnapshotResult.of(operatorRawStateHandle); RunnableFuture<SnapshotResult<OperatorStateHandle>> operatorStateRawFuture = spy(DoneFuture.of(operatorStateRawResult)); InputChannelStateHandle inputChannelRawStateHandle = mock(InputChannelStateHandle.class); SnapshotResult<StateObjectCollection<InputChannelStateHandle>> inputChannelStateRawResult = SnapshotResult.of(StateObjectCollection.singleton(inputChannelRawStateHandle)); Future<SnapshotResult<StateObjectCollection<InputChannelStateHandle>>> inputChannelStateRawFuture = spy(DoneFuture.of(inputChannelStateRawResult)); ResultSubpartitionStateHandle resultSubpartitionRawStateHandle = mock(ResultSubpartitionStateHandle.class); SnapshotResult<StateObjectCollection<ResultSubpartitionStateHandle>> resultSubpartitionStateRawResult = SnapshotResult.of( StateObjectCollection.singleton(resultSubpartitionRawStateHandle)); Future<SnapshotResult<StateObjectCollection<ResultSubpartitionStateHandle>>> resultSubpartitionStateRawFuture = spy(DoneFuture.of(resultSubpartitionStateRawResult)); operatorSnapshotResult = new OperatorSnapshotFutures( keyedStateManagedFuture, keyedStateRawFuture, operatorStateManagedFuture, operatorStateRawFuture, inputChannelStateRawFuture, resultSubpartitionStateRawFuture); operatorSnapshotResult.cancel(); verify(keyedStateManagedFuture).cancel(true); verify(keyedStateRawFuture).cancel(true); verify(operatorStateManagedFuture).cancel(true); verify(operatorStateRawFuture).cancel(true); verify(inputChannelStateRawFuture).cancel(true); verify(resultSubpartitionStateRawFuture).cancel(true); verify(keyedManagedStateHandle).discardState(); verify(keyedRawStateHandle).discardState(); verify(operatorManagedStateHandle).discardState(); verify(operatorRawStateHandle).discardState(); verify(inputChannelRawStateHandle).discardState(); verify(resultSubpartitionRawStateHandle).discardState(); }
public static boolean isValidUrl(final String url) { try { String encodedURL = URLEncoder.encode(url, Charset.defaultCharset().name()); return url.equals(encodedURL); } catch (UnsupportedEncodingException e) { return false; } }
@Test public void should_know_valid_url_character() { assertThat(URLs.isValidUrl("base"), is(true)); assertThat(URLs.isValidUrl("base path"), is(false)); }
void subscribeNewCommentNotification(Post post) { var subscriber = new Subscription.Subscriber(); subscriber.setName(post.getSpec().getOwner()); var interestReason = new Subscription.InterestReason(); interestReason.setReasonType(NotificationReasonConst.NEW_COMMENT_ON_POST); interestReason.setExpression( "props.postOwner == '%s'".formatted(post.getSpec().getOwner())); notificationCenter.subscribe(subscriber, interestReason).block(); }
@Test void subscribeNewCommentNotificationTest() { Post post = TestPost.postV1(); postReconciler.subscribeNewCommentNotification(post); verify(notificationCenter).subscribe( assertArg(subscriber -> assertThat(subscriber.getName()) .isEqualTo(post.getSpec().getOwner())), assertArg(argReason -> { var interestReason = new Subscription.InterestReason(); interestReason.setReasonType(NotificationReasonConst.NEW_COMMENT_ON_POST); interestReason.setExpression("props.postOwner == 'null'"); assertThat(argReason).isEqualTo(interestReason); })); }
public static UMethodInvocation create( List<? extends UExpression> typeArguments, UExpression methodSelect, List<UExpression> arguments) { return new AutoValue_UMethodInvocation( ImmutableList.copyOf(typeArguments), methodSelect, ImmutableList.copyOf(arguments)); }
@Test public void match() { UExpression fooIdent = mock(UExpression.class); when(fooIdent.unify(ident("foo"), isA(Unifier.class))).thenReturn(Choice.of(unifier)); ULiteral oneLit = ULiteral.intLit(1); ULiteral barLit = ULiteral.stringLit("bar"); UMethodInvocation invocation = UMethodInvocation.create( ImmutableList.of(), fooIdent, ImmutableList.<UExpression>of(oneLit, barLit)); assertUnifies("foo(1, \"bar\")", invocation); }
public static DistCpOptions parse(String[] args) throws IllegalArgumentException { CommandLineParser parser = new CustomParser(); CommandLine command; try { command = parser.parse(cliOptions, args, true); } catch (ParseException e) { throw new IllegalArgumentException("Unable to parse arguments. " + Arrays.toString(args), e); } DistCpOptions.Builder builder = parseSourceAndTargetPaths(command); builder .withAtomicCommit( command.hasOption(DistCpOptionSwitch.ATOMIC_COMMIT.getSwitch())) .withSyncFolder( command.hasOption(DistCpOptionSwitch.SYNC_FOLDERS.getSwitch())) .withDeleteMissing( command.hasOption(DistCpOptionSwitch.DELETE_MISSING.getSwitch())) .withIgnoreFailures( command.hasOption(DistCpOptionSwitch.IGNORE_FAILURES.getSwitch())) .withOverwrite( command.hasOption(DistCpOptionSwitch.OVERWRITE.getSwitch())) .withAppend( command.hasOption(DistCpOptionSwitch.APPEND.getSwitch())) .withSkipCRC( command.hasOption(DistCpOptionSwitch.SKIP_CRC.getSwitch())) .withBlocking( !command.hasOption(DistCpOptionSwitch.BLOCKING.getSwitch())) .withVerboseLog( command.hasOption(DistCpOptionSwitch.VERBOSE_LOG.getSwitch())) .withDirectWrite( command.hasOption(DistCpOptionSwitch.DIRECT_WRITE.getSwitch())) .withUseIterator( command.hasOption(DistCpOptionSwitch.USE_ITERATOR.getSwitch())) .withUpdateRoot( command.hasOption(DistCpOptionSwitch.UPDATE_ROOT.getSwitch())); if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.DIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseDiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.RDIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.RDIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseRdiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.FILTERS.getSwitch())) { builder.withFiltersFile( getVal(command, DistCpOptionSwitch.FILTERS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.LOG_PATH.getSwitch())) { builder.withLogPath( new Path(getVal(command, DistCpOptionSwitch.LOG_PATH.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch())) { final String workPath = getVal(command, DistCpOptionSwitch.WORK_PATH.getSwitch()); if (workPath != null && !workPath.isEmpty()) { builder.withAtomicWorkPath(new Path(workPath)); } } if (command.hasOption(DistCpOptionSwitch.TRACK_MISSING.getSwitch())) { builder.withTrackMissing( new Path(getVal( command, DistCpOptionSwitch.TRACK_MISSING.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.BANDWIDTH.getSwitch())) { try { final Float mapBandwidth = Float.parseFloat( getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch())); builder.withMapBandwidth(mapBandwidth); } catch (NumberFormatException e) { throw new IllegalArgumentException("Bandwidth specified is invalid: " + getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()), e); } } if (command.hasOption( DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())) { try { final Integer numThreads = Integer.parseInt(getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())); builder.withNumListstatusThreads(numThreads); } catch (NumberFormatException e) { throw new IllegalArgumentException( "Number of liststatus threads is invalid: " + getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.MAX_MAPS.getSwitch())) { try { final Integer maps = Integer.parseInt( getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch())); builder.maxMaps(maps); } catch (NumberFormatException e) { throw new IllegalArgumentException("Number of maps is invalid: " + getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.COPY_STRATEGY.getSwitch())) { builder.withCopyStrategy( getVal(command, DistCpOptionSwitch.COPY_STRATEGY.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) { builder.preserve( getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.FILE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.SIZE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.SIZE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch())) { final String chunkSizeStr = getVal(command, DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch().trim()); try { int csize = Integer.parseInt(chunkSizeStr); csize = csize > 0 ? csize : 0; LOG.info("Set distcp blocksPerChunk to " + csize); builder.withBlocksPerChunk(csize); } catch (NumberFormatException e) { throw new IllegalArgumentException("blocksPerChunk is invalid: " + chunkSizeStr, e); } } if (command.hasOption(DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch())) { final String copyBufferSizeStr = getVal(command, DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch().trim()); try { int copyBufferSize = Integer.parseInt(copyBufferSizeStr); builder.withCopyBufferSize(copyBufferSize); } catch (NumberFormatException e) { throw new IllegalArgumentException("copyBufferSize is invalid: " + copyBufferSizeStr, e); } } return builder.build(); }
@Test public void testInvalidArgs() { try { OptionsParser.parse(new String[] { "-m", "-f", "hdfs://localhost:8020/source"}); Assert.fail("Missing map value"); } catch (IllegalArgumentException ignore) {} }
@Override public ComponentCreationData createProjectAndBindToDevOpsPlatform(DbSession dbSession, CreationMethod creationMethod, Boolean monorepo, @Nullable String projectKey, @Nullable String projectName) { String key = Optional.ofNullable(projectKey).orElse(generateUniqueProjectKey()); boolean isManaged = devOpsPlatformSettings.isProvisioningEnabled(); Boolean shouldProjectBePrivate = shouldProjectBePrivate(devOpsProjectCreationContext.isPublic()); ComponentCreationData componentCreationData = projectCreator.createProject(dbSession, key, getProjectName(projectName), devOpsProjectCreationContext.defaultBranchName(), creationMethod, shouldProjectBePrivate, isManaged); ProjectDto projectDto = Optional.ofNullable(componentCreationData.projectDto()).orElseThrow(); createProjectAlmSettingDto(dbSession, projectDto, devOpsProjectCreationContext.almSettingDto(), monorepo); addScanPermissionToCurrentUser(dbSession, projectDto); BranchDto mainBranchDto = Optional.ofNullable(componentCreationData.mainBranchDto()).orElseThrow(); if (isManaged) { syncProjectPermissionsWithDevOpsPlatform(projectDto, mainBranchDto); } return componentCreationData; }
@Test void createProjectAndBindToDevOpsPlatformFromScanner_whenVisibilitySynchronizationDisabled_successfullyCreatesProjectAndMakesProjectPrivate() { // given mockGeneratedProjectKey(); ComponentCreationData componentCreationData = mockProjectCreation("generated_orga2/repo1"); ProjectAlmSettingDao projectAlmSettingDao = mock(); when(dbClient.projectAlmSettingDao()).thenReturn(projectAlmSettingDao); when(devOpsPlatformSettings.isProvisioningEnabled()).thenReturn(true); when(devOpsPlatformSettings.isProjectVisibilitySynchronizationActivated()).thenReturn(false); // when ComponentCreationData actualComponentCreationData = defaultDevOpsProjectCreator.createProjectAndBindToDevOpsPlatform(dbClient.openSession(true), SCANNER_API_DEVOPS_AUTO_CONFIG, false, null, null); // then assertThat(actualComponentCreationData).isEqualTo(componentCreationData); ComponentCreationParameters componentCreationParameters = componentCreationParametersCaptor.getValue(); assertThat(componentCreationParameters.newComponent().isPrivate()).isTrue(); }
public static ByteBuf wrappedBuffer(byte[] array) { if (array.length == 0) { return EMPTY_BUFFER; } return new UnpooledHeapByteBuf(ALLOC, array, array.length); }
@Test public void testCompare2() { ByteBuf expected = wrappedBuffer(new byte[]{(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF}); ByteBuf actual = wrappedBuffer(new byte[]{(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00}); assertTrue(ByteBufUtil.compare(expected, actual) > 0); expected.release(); actual.release(); expected = wrappedBuffer(new byte[]{(byte) 0xFF}); actual = wrappedBuffer(new byte[]{(byte) 0x00}); assertTrue(ByteBufUtil.compare(expected, actual) > 0); expected.release(); actual.release(); }
@Override public String toString() { StringBuilder b = new StringBuilder(); if (StringUtils.isNotBlank(protocol)) { b.append(protocol); b.append("://"); } if (StringUtils.isNotBlank(host)) { b.append(host); } if (!isPortDefault() && port != -1) { b.append(':'); b.append(port); } if (StringUtils.isNotBlank(path)) { // If no scheme/host/port, leave the path as is if (b.length() > 0 && !path.startsWith("/")) { b.append('/'); } b.append(encodePath(path)); } if (queryString != null && !queryString.isEmpty()) { b.append(queryString.toString()); } if (fragment != null) { b.append("#"); b.append(encodePath(fragment)); } return b.toString(); }
@Test public void testInvalidURL() { s = "http://www.example.com/\"path\""; t = "http://www.example.com/%22path%22"; assertEquals(t, new HttpURL(s).toString()); }
public static Map<String, String> computeAliases(PluginScanResult scanResult) { Map<String, Set<String>> aliasCollisions = new HashMap<>(); scanResult.forEach(pluginDesc -> { aliasCollisions.computeIfAbsent(simpleName(pluginDesc), ignored -> new HashSet<>()).add(pluginDesc.className()); aliasCollisions.computeIfAbsent(prunedName(pluginDesc), ignored -> new HashSet<>()).add(pluginDesc.className()); }); Map<String, String> aliases = new HashMap<>(); for (Map.Entry<String, Set<String>> entry : aliasCollisions.entrySet()) { String alias = entry.getKey(); Set<String> classNames = entry.getValue(); if (classNames.size() == 1) { aliases.put(alias, classNames.stream().findAny().get()); } else { log.debug("Ignoring ambiguous alias '{}' since it refers to multiple distinct plugins {}", alias, classNames); } } return aliases; }
@Test public void testCollidingPrunedAlias() { SortedSet<PluginDesc<Converter>> converters = new TreeSet<>(); converters.add(new PluginDesc<>(CollidingConverter.class, null, PluginType.CONVERTER, CollidingConverter.class.getClassLoader())); SortedSet<PluginDesc<HeaderConverter>> headerConverters = new TreeSet<>(); headerConverters.add(new PluginDesc<>(CollidingHeaderConverter.class, null, PluginType.HEADER_CONVERTER, CollidingHeaderConverter.class.getClassLoader())); PluginScanResult result = new PluginScanResult( Collections.emptySortedSet(), Collections.emptySortedSet(), converters, headerConverters, Collections.emptySortedSet(), Collections.emptySortedSet(), Collections.emptySortedSet(), Collections.emptySortedSet(), Collections.emptySortedSet() ); Map<String, String> actualAliases = PluginUtils.computeAliases(result); Map<String, String> expectedAliases = new HashMap<>(); expectedAliases.put("CollidingConverter", CollidingConverter.class.getName()); expectedAliases.put("CollidingHeaderConverter", CollidingHeaderConverter.class.getName()); assertEquals(expectedAliases, actualAliases); }
@Override public void stopAndCleanupCluster(String clusterId) { this.internalClient .apps() .deployments() .withName(KubernetesUtils.getDeploymentName(clusterId)) .cascading(true) .delete(); }
@Test void testStopAndCleanupCluster() throws Exception { this.flinkKubeClient.createJobManagerComponent(this.kubernetesJobManagerSpecification); final KubernetesPod kubernetesPod = buildKubernetesPod(TASKMANAGER_POD_NAME); this.flinkKubeClient.createTaskManagerPod(kubernetesPod).get(); assertThat( this.kubeClient .apps() .deployments() .inNamespace(NAMESPACE) .list() .getItems() .size()) .isEqualTo(1); assertThat(this.kubeClient.configMaps().inNamespace(NAMESPACE).list().getItems().size()) .isEqualTo(1); assertThat(this.kubeClient.services().inNamespace(NAMESPACE).list().getItems()).hasSize(2); assertThat(this.kubeClient.pods().inNamespace(NAMESPACE).list().getItems()).hasSize(1); this.flinkKubeClient.stopAndCleanupCluster(CLUSTER_ID); assertThat(this.kubeClient.apps().deployments().inNamespace(NAMESPACE).list().getItems()) .isEmpty(); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command( config, MigrationsUtil::getKsqlClient, getMigrationsDir(getConfigFile(), config), Clock.systemDefaultZone() ); }
@Test public void shouldThrowErrorOnParsingFailure() throws Exception { // Given: command = PARSER.parse("-n"); createMigrationFile(1, NAME, migrationsDir, "SHOW TABLES;"); when(versionQueryResult.get()).thenReturn(ImmutableList.of()); // When: final int result = command.command(config, (cfg, headers) -> ksqlClient, migrationsDir, Clock.fixed( Instant.ofEpochMilli(1000), ZoneId.systemDefault())); // Then: assertThat(result, is(1)); final InOrder inOrder = inOrder(ksqlClient); verifyMigratedVersion( inOrder, 1, "<none>", MigrationState.ERROR, Optional.of("Failed to parse sql: SHOW TABLES;. Error: 'SHOW' statements are not supported."), () -> {}); }
@Override @SuppressWarnings("DuplicatedCode") public Integer cleanJobLog(Integer exceedDay, Integer deleteLimit) { int count = 0; LocalDateTime expireDate = LocalDateTime.now().minusDays(exceedDay); // 循环删除,直到没有满足条件的数据 for (int i = 0; i < Short.MAX_VALUE; i++) { int deleteCount = jobLogMapper.deleteByCreateTimeLt(expireDate, deleteLimit); count += deleteCount; // 达到删除预期条数,说明到底了 if (deleteCount < deleteLimit) { break; } } return count; }
@Test public void testCleanJobLog() { // mock 数据 JobLogDO log01 = randomPojo(JobLogDO.class, o -> o.setCreateTime(addTime(Duration.ofDays(-3)))) .setExecuteIndex(1); jobLogMapper.insert(log01); JobLogDO log02 = randomPojo(JobLogDO.class, o -> o.setCreateTime(addTime(Duration.ofDays(-1)))) .setExecuteIndex(1); jobLogMapper.insert(log02); // 准备参数 Integer exceedDay = 2; Integer deleteLimit = 1; // 调用 Integer count = jobLogService.cleanJobLog(exceedDay, deleteLimit); // 断言 assertEquals(1, count); List<JobLogDO> logs = jobLogMapper.selectList(); assertEquals(1, logs.size()); assertEquals(log02, logs.get(0)); }
public int wipeWritePermOfBroker(final String namesrvAddr, String brokerName, final long timeoutMillis) throws RemotingCommandException, RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQClientException { WipeWritePermOfBrokerRequestHeader requestHeader = new WipeWritePermOfBrokerRequestHeader(); requestHeader.setBrokerName(brokerName); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.WIPE_WRITE_PERM_OF_BROKER, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(namesrvAddr, request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { WipeWritePermOfBrokerResponseHeader responseHeader = (WipeWritePermOfBrokerResponseHeader) response.decodeCommandCustomHeader(WipeWritePermOfBrokerResponseHeader.class); return responseHeader.getWipeTopicCount(); } default: break; } throw new MQClientException(response.getCode(), response.getRemark()); }
@Test public void assertWipeWritePermOfBroker() throws RemotingException, InterruptedException, MQClientException { mockInvokeSync(); WipeWritePermOfBrokerResponseHeader responseHeader = mock(WipeWritePermOfBrokerResponseHeader.class); when(responseHeader.getWipeTopicCount()).thenReturn(1); setResponseHeader(responseHeader); assertEquals(1, mqClientAPI.wipeWritePermOfBroker(defaultNsAddr, brokerName, defaultTimeout)); }
@VisibleForTesting List<MessageSummary> getMessageBacklog(EventNotificationContext ctx, SlackEventNotificationConfig config) { List<MessageSummary> backlog = notificationCallbackService.getBacklogForEvent(ctx); if (config.backlogSize() > 0 && backlog != null) { return backlog.stream().limit(config.backlogSize()).collect(Collectors.toList()); } return backlog; }
@Test public void testBacklogMessageLimitWhenBacklogSizeIsZero() { SlackEventNotificationConfig slackConfig = SlackEventNotificationConfig.builder() .backlogSize(0) .build(); //global setting is at N and the message override is 0 then the backlog size = 50 List<MessageSummary> messageSummaries = slackEventNotification.getMessageBacklog(eventNotificationContext, slackConfig); assertThat(messageSummaries.size()).isEqualTo(50); }
@VisibleForTesting Path getWarArtifact() { Build build = project.getBuild(); String warName = build.getFinalName(); Plugin warPlugin = project.getPlugin("org.apache.maven.plugins:maven-war-plugin"); if (warPlugin != null) { for (PluginExecution execution : warPlugin.getExecutions()) { if ("default-war".equals(execution.getId())) { Xpp3Dom configuration = (Xpp3Dom) execution.getConfiguration(); warName = getChildValue(configuration, "warName").orElse(warName); } } } return Paths.get(build.getDirectory(), warName + ".war"); }
@Test public void testGetWarArtifact() { when(mockBuild.getDirectory()).thenReturn(Paths.get("/foo/bar").toString()); when(mockBuild.getFinalName()).thenReturn("helloworld-1"); assertThat(mavenProjectProperties.getWarArtifact()) .isEqualTo(Paths.get("/foo/bar/helloworld-1.war")); }
public static Write write() { return new AutoValue_PulsarIO_Write.Builder().build(); }
@Test public void testWriteFromTopic() { try { PulsarIO.Write writer = PulsarIO.write().withClientUrl(pulsarContainer.getPulsarBrokerUrl()).withTopic(TOPIC); int numberOfMessages = 100; List<byte[]> messages = new ArrayList<>(); for (int i = 0; i < numberOfMessages; i++) { messages.add(("PULSAR_WRITER_TEST_" + i).getBytes(StandardCharsets.UTF_8)); } testPipeline.apply(Create.of(messages)).apply(writer); testPipeline.run(); List<Message<byte[]>> receiveMsgs = receiveMessages(); assertEquals(numberOfMessages, receiveMessages().size()); for (int i = 0; i < numberOfMessages; i++) { assertTrue( new String(receiveMsgs.get(i).getValue(), StandardCharsets.UTF_8) .equals("PULSAR_WRITER_TEST_" + i)); } } catch (Exception e) { LOG.error(e.getMessage()); } }
@Override public Object read(final MySQLPacketPayload payload, final boolean unsigned) throws SQLException { int length = payload.readInt1(); payload.readInt1(); payload.readInt4(); switch (length) { case 0: return new Timestamp(0L); case 8: return getTimestamp(payload); case 12: Timestamp result = getTimestamp(payload); result.setNanos(payload.readInt4()); return result; default: throw new SQLFeatureNotSupportedException(String.format("Wrong length `%d` of MYSQL_TYPE_DATE", length)); } }
@Test void assertReadWithEightBytes() throws SQLException { when(payload.readInt1()).thenReturn(8, 0, 10, 59, 0); Calendar actual = Calendar.getInstance(); actual.setTimeInMillis(((Timestamp) new MySQLTimeBinaryProtocolValue().read(payload, false)).getTime()); assertThat(actual.get(Calendar.HOUR_OF_DAY), is(10)); assertThat(actual.get(Calendar.MINUTE), is(59)); assertThat(actual.get(Calendar.SECOND), is(0)); }
@Override public int getEventType() { return eventType; }
@Test public void testGetEventType() { assertEquals(23, localCacheWideEventData.getEventType()); }
public static String getLocalHostAddress() { if (useFqdn) { return localAddr.getCanonicalHostName(); } return InetAddresses.toAddrString(localAddr); }
@Test public void enableFQDNTest() throws UnknownHostException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException { mockNet(); Field field = FrontendOptions.class.getDeclaredField("localAddr"); field.setAccessible(true); field.set(null, addr); Field field1 = FrontendOptions.class.getDeclaredField("useFqdn"); field1.setAccessible(true); field1.set(null, true); Assert.assertTrue(FrontendOptions.getLocalHostAddress().equals("sandbox")); field1.set(null, false); Assert.assertTrue(FrontendOptions.getLocalHostAddress().equals("127.0.0.10")); }
@Override public ClusterInfo clusterGetClusterInfo() { RFuture<Map<String, String>> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.CLUSTER_INFO); Map<String, String> entries = syncFuture(f); Properties props = new Properties(); for (Entry<String, String> entry : entries.entrySet()) { props.setProperty(entry.getKey(), entry.getValue()); } return new ClusterInfo(props); }
@Test public void testClusterGetClusterInfo() { ClusterInfo info = connection.clusterGetClusterInfo(); assertThat(info.getSlotsFail()).isEqualTo(0); assertThat(info.getSlotsOk()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT); assertThat(info.getSlotsAssigned()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT); }
public static Type convertType(TypeInfo typeInfo) { switch (typeInfo.getOdpsType()) { case BIGINT: return Type.BIGINT; case INT: return Type.INT; case SMALLINT: return Type.SMALLINT; case TINYINT: return Type.TINYINT; case FLOAT: return Type.FLOAT; case DECIMAL: DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo; return ScalarType.createUnifiedDecimalType(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale()); case DOUBLE: return Type.DOUBLE; case CHAR: CharTypeInfo charTypeInfo = (CharTypeInfo) typeInfo; return ScalarType.createCharType(charTypeInfo.getLength()); case VARCHAR: VarcharTypeInfo varcharTypeInfo = (VarcharTypeInfo) typeInfo; return ScalarType.createVarcharType(varcharTypeInfo.getLength()); case STRING: case JSON: return ScalarType.createDefaultCatalogString(); case BINARY: return Type.VARBINARY; case BOOLEAN: return Type.BOOLEAN; case DATE: return Type.DATE; case TIMESTAMP: case DATETIME: return Type.DATETIME; case MAP: MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo; return new MapType(convertType(mapTypeInfo.getKeyTypeInfo()), convertType(mapTypeInfo.getValueTypeInfo())); case ARRAY: ArrayTypeInfo arrayTypeInfo = (ArrayTypeInfo) typeInfo; return new ArrayType(convertType(arrayTypeInfo.getElementTypeInfo())); case STRUCT: StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo; List<Type> fieldTypeList = structTypeInfo.getFieldTypeInfos().stream().map(EntityConvertUtils::convertType) .collect(Collectors.toList()); return new StructType(fieldTypeList); default: return Type.VARCHAR; } }
@Test public void testConvertTypeCaseInt() { TypeInfo typeInfo = TypeInfoFactory.INT; Type result = EntityConvertUtils.convertType(typeInfo); assertEquals(Type.INT, result); }
public StringableMap(String s) { super(); if (s == null || s.isEmpty()) { return; } String[] parts = s.split(":", 2); // read that many chars int numElements = Integer.parseInt(parts[0]); s = parts[1]; for (int i = 0; i < numElements; i++) { // Get the key String. parts = s.split(":", 2); int len = Integer.parseInt(parts[0]); String key = ""; // Default is now an empty string. if (len > 0) key = parts[1].substring(0, len); // Please check the toString() method of this class. // null has -1 as length and empty String has 0. else if (len < 0) { key = null; len = 0; // Set 0 to 'len' for null-valued key // since only len exists for null-valued key from the given String "s". } // Get the value String for the key parts = parts[1].substring(len).split(":", 2); len = Integer.parseInt(parts[0]); String value = ""; if (len > 0) value = parts[1].substring(0, len); else if (len < 0) { value = null; len = 0; // Set 0 to 'len' since only len exists. } // Put the entry into the HashMap<String, String>. put(key, value); // Move to the next substring to process. s = parts[1].substring(len); } }
@Test public void stringableMap() throws Exception { // Empty map case StringableMap m = new StringableMap(new HashMap<String, String>()); String s = m.toString(); Assert.assertEquals("0:", s); m = new StringableMap(s); Assert.assertEquals(0, m.size()); Map<String, String> base = new HashMap<String, String>(); base.put("mary", "poppins"); base.put("bert", null); base.put(null, "banks"); m = new StringableMap(base); s = m.toString(); m = new StringableMap(s); Assert.assertEquals(3, m.size()); Map<String, Boolean> saw = new HashMap<String, Boolean>(3); saw.put("mary", false); saw.put("bert", false); saw.put(null, false); for (Map.Entry<String, String> e : m.entrySet()) { saw.put(e.getKey(), true); if ("mary".equals(e.getKey())) Assert.assertEquals("poppins", e.getValue()); else if ("bert".equals(e.getKey())) Assert.assertNull(e.getValue()); else if (null == e.getKey()) Assert.assertEquals("banks", e.getValue()); else Assert.fail("Unexpected value " + e.getKey()); } Assert.assertEquals(3, saw.size()); Assert.assertTrue(saw.get("mary")); Assert.assertTrue(saw.get("bert")); Assert.assertTrue(saw.get(null)); }
public URI getHttpPublishUri() { if (httpPublishUri == null) { final URI defaultHttpUri = getDefaultHttpUri(); LOG.debug("No \"http_publish_uri\" set. Using default <{}>.", defaultHttpUri); return defaultHttpUri; } else { final InetAddress inetAddress = toInetAddress(httpPublishUri.getHost()); if (Tools.isWildcardInetAddress(inetAddress)) { final URI defaultHttpUri = getDefaultHttpUri(httpPublishUri.getPath()); LOG.warn("\"{}\" is not a valid setting for \"http_publish_uri\". Using default <{}>.", httpPublishUri, defaultHttpUri); return defaultHttpUri; } else { return Tools.normalizeURI(httpPublishUri, httpPublishUri.getScheme(), GRAYLOG_DEFAULT_PORT, httpPublishUri.getPath()); } } }
@Test public void testHttpPublishUriWithCustomPort() throws RepositoryException, ValidationException { jadConfig.setRepository(new InMemoryRepository(ImmutableMap.of("http_publish_uri", "http://example.com:12900/"))).addConfigurationBean(configuration).process(); assertThat(configuration.getHttpPublishUri()).hasPort(12900); }
@Override public BigDecimal getBigDecimal(final int columnIndex) throws SQLException { return (BigDecimal) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, BigDecimal.class), BigDecimal.class); }
@Test void assertGetBigDecimalAndScaleWithColumnLabel() throws SQLException { when(mergeResultSet.getValue(1, BigDecimal.class)).thenReturn(new BigDecimal("1")); assertThat(shardingSphereResultSet.getBigDecimal("label", 10), is(new BigDecimal("1"))); }
@GwtIncompatible("java.util.regex.Pattern") public void containsMatch(@Nullable Pattern regex) { checkNotNull(regex); if (actual == null) { failWithActual("expected a string that contains a match for", regex); } else if (!regex.matcher(actual).find()) { failWithActual("expected to contain a match for", regex); } }
@Test public void stringContainsMatchStringFailNull() { expectFailureWhenTestingThat(null).containsMatch(".*b.*"); assertFailureValue("expected a string that contains a match for", ".*b.*"); }
@Override public String toString() { return MoreObjects.toStringHelper(ByteKeyRange.class) .add("startKey", startKey) .add("endKey", endKey) .toString(); }
@Test public void testToString() { assertEquals("ByteKeyRange{startKey=[], endKey=[0a]}", UP_TO_10.toString()); }
public static void main( String[] args ) { // suppress the Dock icon on OS X System.setProperty("apple.awt.UIElement", "true"); int exitCode = new CommandLine(new ExtractText()).execute(args); System.exit(exitCode); }
@Test void testPDFBoxRepeatableSubcommand() throws Exception { PDFBox.main(new String[] { "export:text", "-i", testfile1, "-console", // "export:text", "-i", testfile2, "-console" }); String result = out.toString("UTF-8"); assertTrue(result.contains("PDF1")); assertTrue(result.contains("PDF2")); assertFalse(result.contains("PDF file: " + filename1)); assertTrue(result.contains("Hello")); assertTrue(result.contains("World.")); assertFalse(result.contains("PDF file: " + filename2)); }
public IMetaStoreClient get(final HiveConf hiveConf) throws MetaException, IOException, LoginException { final HiveClientCacheKey cacheKey = HiveClientCacheKey.fromHiveConf(hiveConf, getThreadId()); ICacheableMetaStoreClient cacheableHiveMetaStoreClient = null; // the hmsc is not shared across threads. So the only way it could get closed while we are doing healthcheck // is if removalListener closes it. The synchronization takes care that removalListener won't do it synchronized (CACHE_TEARDOWN_LOCK) { cacheableHiveMetaStoreClient = getOrCreate(cacheKey); cacheableHiveMetaStoreClient.acquire(); } if (!cacheableHiveMetaStoreClient.isOpen()) { synchronized (CACHE_TEARDOWN_LOCK) { hiveCache.invalidate(cacheKey); cacheableHiveMetaStoreClient.close(); cacheableHiveMetaStoreClient = getOrCreate(cacheKey); cacheableHiveMetaStoreClient.acquire(); } } return cacheableHiveMetaStoreClient; }
@Test public void testCacheExpiry() throws IOException, MetaException, LoginException, InterruptedException { HiveClientCache cache = new HiveClientCache(1); HiveClientCache.ICacheableMetaStoreClient client = (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf); assertNotNull(client); Thread.sleep(2500); HiveClientCache.ICacheableMetaStoreClient client2 = (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf); client.close(); assertTrue(client.isClosed()); // close() after *expiry time* and *a cache access* should have tore down the client assertNotNull(client2); assertNotSame(client, client2); }
@Beta @Nonnull public ClientConfig setTpcConfig(@Nonnull ClientTpcConfig tpcConfig) { this.tpcConfig = isNotNull(tpcConfig, "tpcConfig"); return this; }
@Test public void testTpcConfig() { ClientConfig config = new ClientConfig(); ClientTpcConfig tpcConfig = new ClientTpcConfig(); assertFalse(tpcConfig.isEnabled()); tpcConfig.setEnabled(true); tpcConfig.setConnectionCount(10); config.setTpcConfig(tpcConfig); assertTrue(tpcConfig.isEnabled()); assertEquals(10, tpcConfig.getConnectionCount()); assertThrows(IllegalArgumentException.class, () -> config.setTpcConfig(null)); }
@Override public boolean next() throws SQLException { if (skipAll) { return false; } if (!paginationContext.getActualRowCount().isPresent()) { return getMergedResult().next(); } return ++rowNumber <= paginationContext.getActualRowCount().get() && getMergedResult().next(); }
@Test void assertNextForSkipAll() throws SQLException { ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); MySQLSelectStatement selectStatement = new MySQLSelectStatement(); selectStatement.setProjections(new ProjectionsSegment(0, 0)); selectStatement.setLimit(new LimitSegment(0, 0, new NumberLiteralLimitValueSegment(0, 0, Long.MAX_VALUE), null)); SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), Collections.emptyList(), selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); when(database.getName()).thenReturn(DefaultDatabase.LOGIC_NAME); ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "MySQL")); MergedResult actual = resultMerger.merge(Arrays.asList(mockQueryResult(), mockQueryResult(), mockQueryResult(), mockQueryResult()), selectStatementContext, database, mock(ConnectionContext.class)); assertFalse(actual.next()); }
@Override public ListenableFuture<?> execute(StartTransaction statement, TransactionManager transactionManager, Metadata metadata, AccessControl accessControl, QueryStateMachine stateMachine, List<Expression> parameters) { Session session = stateMachine.getSession(); if (!session.isClientTransactionSupport()) { throw new PrestoException(StandardErrorCode.INCOMPATIBLE_CLIENT, "Client does not support transactions"); } if (session.getTransactionId().isPresent()) { throw new PrestoException(StandardErrorCode.NOT_SUPPORTED, "Nested transactions not supported"); } Optional<IsolationLevel> isolationLevel = extractIsolationLevel(statement); Optional<Boolean> readOnly = extractReadOnly(statement); TransactionId transactionId = transactionManager.beginTransaction( isolationLevel.orElse(TransactionManager.DEFAULT_ISOLATION), readOnly.orElse(TransactionManager.DEFAULT_READ_ONLY), false); stateMachine.setStartedTransactionId(transactionId); // Since the current session does not contain this new transaction ID, we need to manually mark it as inactive // when this statement completes. transactionManager.trySetInactive(transactionId); return immediateFuture(null); }
@Test public void testStartTransactionTooManyAccessModes() { Session session = sessionBuilder() .setClientTransactionSupport() .build(); TransactionManager transactionManager = createTestTransactionManager(); QueryStateMachine stateMachine = createQueryStateMachine("START TRANSACTION", session, true, transactionManager, executor, metadata); assertFalse(stateMachine.getSession().getTransactionId().isPresent()); try { getFutureValue(new StartTransactionTask().execute( new StartTransaction(ImmutableList.of(new TransactionAccessMode(true), new TransactionAccessMode(true))), transactionManager, metadata, new AllowAllAccessControl(), stateMachine, emptyList())); fail(); } catch (SemanticException e) { assertEquals(e.getCode(), INVALID_TRANSACTION_MODE); } assertTrue(transactionManager.getAllTransactionInfos().isEmpty()); assertFalse(stateMachine.getQueryInfo(Optional.empty()).isClearTransactionId()); assertFalse(stateMachine.getQueryInfo(Optional.empty()).getStartedTransactionId().isPresent()); }
@Override protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) { KeyAuthRuleHandle keyAuthRuleHandle = KeyAuthPluginDataHandler.CACHED_HANDLE.get() .obtainHandle(CacheKeyUtils.INST.getKey(rule)); if (Objects.isNull(keyAuthRuleHandle) || StringUtils.isBlank(keyAuthRuleHandle.getKeyName()) || StringUtils.isBlank(keyAuthRuleHandle.getKey())) { Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.KEY_NAME_AND_KEY_MUST_BE_CONFIGURED); return WebFluxResultUtils.result(exchange, error); } if (checkKey(exchange, keyAuthRuleHandle.getKeyName(), keyAuthRuleHandle.getKey())) { return chain.execute(exchange); } Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.ERROR_KEY); return WebFluxResultUtils.result(exchange, error); }
@Test public void testNotConfigured() { ruleData.setHandle("{}"); keyAuthPluginDataHandler.handlerRule(ruleData); exchange = MockServerWebExchange.from(MockServerHttpRequest .get("localhost") .build()); Mono<Void> mono = keyAuthPlugin.doExecute(exchange, chain, selectorData, ruleData); StepVerifier.create(mono).expectSubscription().verifyComplete(); }
@Override public void accept(T t) { updateTimeHighWaterMark(t.time()); shortTermStorage.add(t); drainDueToLatestInput(t); //standard drain policy drainDueToTimeHighWaterMark(); //prevent blow-up when data goes backwards in time sizeHighWaterMark = Math.max(sizeHighWaterMark, shortTermStorage.size()); }
@Test public void testStalePointsAreNotAutoEvicted() { TimeOrderVerifyingConsumer downstreamConsumer = new TimeOrderVerifyingConsumer(); ApproximateTimeSorter<TimePojo> sorter = new ApproximateTimeSorter<>( Duration.ofSeconds(10), downstreamConsumer ); sorter.accept(new TimePojo(EPOCH)); sorter.accept(new TimePojo(EPOCH.plusSeconds(4))); sorter.accept(new TimePojo(EPOCH.plusSeconds(8))); assertEquals(0, downstreamConsumer.timePojos.size()); //Adding this point should evict the 1st point because it is more than 10 seconds older than the most recent input sorter.accept(new TimePojo(EPOCH.plusSeconds(12))); //evict Epoch + 0 assertEquals(1, downstreamConsumer.timePojos.size()); sorter.accept(new TimePojo(EPOCH.plusSeconds(16))); //evict Epoch + 4 sorter.accept(new TimePojo(EPOCH.plusSeconds(20))); //evict Epoch + 8 assertEquals(3, downstreamConsumer.timePojos.size()); //none of these "semi-stale" points are auto evicted because are in the "wait for more data" window sorter.accept(new TimePojo(EPOCH.plusSeconds(14))); sorter.accept(new TimePojo(EPOCH.plusSeconds(13))); sorter.accept(new TimePojo(EPOCH.plusSeconds(12))); sorter.accept(new TimePojo(EPOCH.plusSeconds(11))); sorter.accept(new TimePojo(EPOCH.plusSeconds(10))); sorter.accept(new TimePojo(EPOCH.plusSeconds(9))); sorter.accept(new TimePojo(EPOCH.plusSeconds(8))); sorter.accept(new TimePojo(EPOCH.plusSeconds(7))); sorter.accept(new TimePojo(EPOCH.plusSeconds(6))); sorter.accept(new TimePojo(EPOCH.plusSeconds(5))); sorter.accept(new TimePojo(EPOCH.plusSeconds(4))); sorter.accept(new TimePojo(EPOCH.plusSeconds(3))); sorter.accept(new TimePojo(EPOCH.plusSeconds(2))); sorter.accept(new TimePojo(EPOCH.plusSeconds(1))); sorter.accept(new TimePojo(EPOCH)); assertEquals(3, downstreamConsumer.timePojos.size()); try { sorter.accept(new TimePojo(EPOCH.minusSeconds(1))); //if no exception is thrown above then we should fail fail("We expect this point the get evicted -- and trigger the TimeOrderVerifyingConsumer"); } catch (AssertionError ae) { //suppress the expected exception } }
public CompletableFuture<Account> removeDevice(final Account account, final byte deviceId) { if (deviceId == Device.PRIMARY_ID) { throw new IllegalArgumentException("Cannot remove primary device"); } return accountLockManager.withLockAsync(List.of(account.getNumber()), () -> removeDevice(account.getIdentifier(IdentityType.ACI), deviceId, MAX_UPDATE_ATTEMPTS), accountLockExecutor); }
@Test void testRemovePrimaryDevice() { final Device primaryDevice = new Device(); primaryDevice.setId(Device.PRIMARY_ID); final Account account = AccountsHelper.generateTestAccount("+14152222222", List.of(primaryDevice)); when(keysManager.deleteSingleUsePreKeys(any(), anyByte())).thenReturn(CompletableFuture.completedFuture(null)); when(messagesManager.clear(any(), anyByte())).thenReturn(CompletableFuture.completedFuture(null)); assertThrows(IllegalArgumentException.class, () -> accountsManager.removeDevice(account, Device.PRIMARY_ID)); assertDoesNotThrow(account::getPrimaryDevice); verify(messagesManager, never()).clear(any(), anyByte()); verify(keysManager, never()).deleteSingleUsePreKeys(any(), anyByte()); verify(clientPresenceManager, never()).disconnectPresence(any(), anyByte()); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position ) { try { final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore .store(QueryableStoreTypes.timestampedWindowStore(), partition); final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds); final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds); try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = cacheBypassFetcher.fetch(store, key, lower, upper)) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next(); final Instant windowStart = Instant.ofEpochMilli(next.key); if (!windowStartBounds.contains(windowStart)) { continue; } final Instant windowEnd = windowStart.plus(windowSize); if (!windowEndBounds.contains(windowEnd)) { continue; } final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli()); final WindowedRow row = WindowedRow.of( stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp() ); builder.add(row); } return KsMaterializedQueryResult.rowIterator(builder.build().iterator()); } } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldFetchWithStartLowerBoundIfHighest() { // Given: final Range<Instant> startBounds = Range.closed( NOW.plusSeconds(5), NOW.plusSeconds(10) ); final Range<Instant> endBounds = Range.closed( NOW, NOW.plusSeconds(15).plus(WINDOW_SIZE) ); // When: table.get(A_KEY, PARTITION, startBounds, endBounds); // Then: verify(cacheBypassFetcher).fetch(eq(tableStore), any(), eq(startBounds.lowerEndpoint()), any()); }
public ImmutableList<PluginMatchingResult<VulnDetector>> getVulnDetectors( ReconnaissanceReport reconnaissanceReport) { return tsunamiPlugins.entrySet().stream() .filter(entry -> isVulnDetector(entry.getKey())) .map(entry -> matchAllVulnDetectors(entry.getKey(), entry.getValue(), reconnaissanceReport)) .flatMap(Streams::stream) .collect(toImmutableList()); }
@Test public void getVulnDetectors_whenServiceNameFilterHasNoMatchingService_returnsEmpty() { NetworkService httpsService = NetworkService.newBuilder() .setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 443)) .setTransportProtocol(TransportProtocol.TCP) .setServiceName("https") .build(); ReconnaissanceReport fakeReconnaissanceReport = ReconnaissanceReport.newBuilder() .setTargetInfo(TargetInfo.getDefaultInstance()) .addNetworkServices(httpsService) .build(); PluginManager pluginManager = Guice.createInjector( new FakePortScannerBootstrapModule(), new FakeServiceFingerprinterBootstrapModule(), FakeServiceNameFilteringDetector.getModule()) .getInstance(PluginManager.class); ImmutableList<PluginMatchingResult<VulnDetector>> vulnDetectors = pluginManager.getVulnDetectors(fakeReconnaissanceReport); assertThat(vulnDetectors).isEmpty(); }
public void isNotNull() { standardIsNotEqualTo(null); }
@Test public void isNotNull() { Object o = new Object(); assertThat(o).isNotNull(); }
public static String get(String urlString, Charset customCharset) { return HttpRequest.get(urlString).charset(customCharset).execute().body(); }
@Test @Disabled public void oschinaTest() { // 请求列表页 String listContent = HttpUtil.get("https://www.oschina.net/action/ajax/get_more_news_list?newsType=&p=2"); // 使用正则获取所有标题 final List<String> titles = ReUtil.findAll("<span class=\"text-ellipsis\">(.*?)</span>", listContent, 1); for (final String title : titles) { // 打印标题 Console.log(title); } // 请求下一页,检查Cookie是否复用 listContent = HttpUtil.get("https://www.oschina.net/action/ajax/get_more_news_list?newsType=&p=3"); Console.log(listContent); }
public Plan validateReservationDeleteRequest( ReservationSystem reservationSystem, ReservationDeleteRequest request) throws YarnException { return validateReservation(reservationSystem, request.getReservationId(), AuditConstants.DELETE_RESERVATION_REQUEST); }
@Test public void testDeleteReservationDoesnotExist() { ReservationDeleteRequest request = new ReservationDeleteRequestPBImpl(); ReservationId rId = ReservationSystemTestUtil.getNewReservationId(); request.setReservationId(rId); when(rSystem.getQueueForReservation(rId)).thenReturn(null); Plan plan = null; try { plan = rrValidator.validateReservationDeleteRequest(rSystem, request); Assert.fail(); } catch (YarnException e) { Assert.assertNull(plan); String message = e.getMessage(); Assert .assertTrue(message.equals(MessageFormat .format( "The specified reservation with ID: {0} is unknown. Please try again with a valid reservation.", rId))); LOG.info(message); } }
@Override public Collection<String> getEnhancedTableNames() { return logicalTableMapper; }
@Test void assertGetEnhancedTableMapper() { assertThat(new LinkedList<>(ruleAttribute.getEnhancedTableNames()), is(Collections.singletonList("foo_tbl"))); }
@VisibleForTesting public static long getDefaultFileLength() { return DEFAULT_FILE_LENGTH; }
@Test public void testCornerConditions() throws Exception { final String segmentName = "someSegment"; PinotDataBufferMemoryManager memoryManager = new MmapMemoryManager(_tmpDir, segmentName); final long s1 = MmapMemoryManager.getDefaultFileLength() - 1; final long s2 = 1; final long s3 = 100 * 1024 * 1024; final String colName = "col"; final byte v1 = 56; final byte v2 = 11; final byte v3 = 32; // Allocate a buffer 1 less than the default file length, and write the last byte of the buffer. PinotDataBuffer b1 = memoryManager.allocate(s1, colName); ByteBuffer bb1 = b1.toDirectByteBuffer(0, (int) s1); bb1.put((int) s1 - 1, v1); // Allocate another buffer that is 1 byte in size, should be in the same file. // Write a value in the byte. PinotDataBuffer b2 = memoryManager.allocate(s2, colName); ByteBuffer bb2 = b2.toDirectByteBuffer(0, (int) s2); bb2.put((int) s2 - 1, v2); // Verify that there is only one file. File dir = new File(_tmpDir); Assert.assertEquals(dir.listFiles().length, 1); // Now allocate another buffer that will open a second file, write a value in the first byte of the buffer. PinotDataBuffer b3 = memoryManager.allocate(s3, colName); ByteBuffer bb3 = b3.toDirectByteBuffer(0, (int) s3); bb3.put(0, v3); // Ensure that there are 2 files. Assert.assertEquals(dir.listFiles().length, 2); // Make sure that the values written are preserved. Assert.assertEquals(bb1.get((int) s1 - 1), v1); Assert.assertEquals(bb2.get((int) s2 - 1), v2); Assert.assertEquals(bb3.get(0), v3); memoryManager.close(); List<String> allocationContexts = PinotDataBuffer.getBufferInfo(); Assert.assertEquals(allocationContexts.size(), 0); }
@Override public void createOperateLog(OperateLogCreateReqDTO createReqDTO) { OperateLogDO log = BeanUtils.toBean(createReqDTO, OperateLogDO.class); operateLogMapper.insert(log); }
@Test public void testCreateOperateLog() { OperateLogCreateReqDTO reqVO = RandomUtils.randomPojo(OperateLogCreateReqDTO.class); // 调研 operateLogServiceImpl.createOperateLog(reqVO); // 断言 OperateLogDO operateLogDO = operateLogMapper.selectOne(null); assertPojoEquals(reqVO, operateLogDO); }
public boolean isAllBindingTables(final Collection<String> logicTableNames) { if (logicTableNames.isEmpty()) { return false; } Optional<BindingTableRule> bindingTableRule = findBindingTableRule(logicTableNames); if (!bindingTableRule.isPresent()) { return false; } Collection<String> result = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); result.addAll(bindingTableRule.get().getAllLogicTables()); return !result.isEmpty() && result.containsAll(logicTableNames); }
@Test void assertIsAllBindingTableWithJoinQueryWithoutJoinCondition() { SelectStatementContext sqlStatementContext = mock(SelectStatementContext.class, RETURNS_DEEP_STUBS); when(sqlStatementContext.isContainsJoinQuery()).thenReturn(true); when(sqlStatementContext.getSqlStatement()).thenReturn(mock(SelectStatement.class)); when(sqlStatementContext.getDatabaseType()).thenReturn(TypedSPILoader.getService(DatabaseType.class, "FIXTURE")); when(sqlStatementContext.getTablesContext().getSchemaName()).thenReturn(Optional.empty()); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getName()).thenReturn("db_schema"); assertFalse( createMaximumShardingRule().isAllBindingTables(database, sqlStatementContext, Arrays.asList("logic_Table", "sub_Logic_Table"))); }
void addEndpointHeaders(ComponentModel componentModel, UriEndpoint uriEndpoint, String scheme) { final Class<?> headersClass = uriEndpoint.headersClass(); if (headersClass == void.class) { getLog().debug(String.format("The endpoint %s has not defined any headers class", uriEndpoint.scheme())); return; } if (!addEndpointHeaders(componentModel, scheme, headersClass, uriEndpoint.headersNameProvider())) { getLog().debug(String.format("No headers have been detected in the headers class %s", headersClass.getName())); } }
@Test void testHeadersNotProperlyDefinedAreIgnored() { mojo.addEndpointHeaders(model, SomeEndpointWithBadHeaders.class.getAnnotation(UriEndpoint.class), "some"); assertEquals(0, model.getEndpointHeaders().size()); }
@Override public Collection<String> getJdbcUrlPrefixes() { return Arrays.asList("jdbc:microsoft:sqlserver:", "jdbc:sqlserver:"); }
@Test void assertGetJdbcUrlPrefixes() { assertThat(TypedSPILoader.getService(DatabaseType.class, "SQLServer").getJdbcUrlPrefixes(), is(Arrays.asList("jdbc:microsoft:sqlserver:", "jdbc:sqlserver:"))); }
public long getCertificateExpirationDateEpoch() { var cert = cert(caCertSecret, CA_CRT); if (cert == null) { throw new RuntimeException(CA_CRT + " does not exist in the secret " + caCertSecret); } return cert.getNotAfter().getTime(); }
@Test @DisplayName("Should raise RuntimeException when certificate is not present") void shouldReturnZeroWhenCertificateNotPresent() { Exception exception = assertThrows(RuntimeException.class, () -> ca.getCertificateExpirationDateEpoch()); assertEquals("ca.crt does not exist in the secret null", exception.getMessage()); }
public static Builder in(Table table) { return new Builder(table); }
@TestTemplate public void testInPartition() { table .newAppend() .appendFile(FILE_A) // bucket 0 .appendFile(FILE_B) // bucket 1 .appendFile(FILE_C) // bucket 2 .appendFile(FILE_D) // bucket 3 .commit(); Iterable<DataFile> files = FindFiles.in(table) .inPartition(table.spec(), StaticDataTask.Row.of(1)) .inPartition(table.spec(), StaticDataTask.Row.of(2)) .collect(); assertThat(pathSet(files)).isEqualTo(pathSet(FILE_B, FILE_C)); }
static ArgumentParser argParser() { ArgumentParser parser = ArgumentParsers .newArgumentParser("producer-performance") .defaultHelp(true) .description("This tool is used to verify the producer performance. To enable transactions, " + "you can specify a transaction id or set a transaction duration using --transaction-duration-ms. " + "There are three ways to specify the transaction id: set transaction.id=<id> via --producer-props, " + "set transaction.id=<id> in the config file via --producer.config, or use --transaction-id <id>."); MutuallyExclusiveGroup payloadOptions = parser .addMutuallyExclusiveGroup() .required(true) .description("either --record-size or --payload-file must be specified but not both."); parser.addArgument("--topic") .action(store()) .required(true) .type(String.class) .metavar("TOPIC") .help("produce messages to this topic"); parser.addArgument("--num-records") .action(store()) .required(true) .type(Long.class) .metavar("NUM-RECORDS") .dest("numRecords") .help("number of messages to produce"); payloadOptions.addArgument("--record-size") .action(store()) .required(false) .type(Integer.class) .metavar("RECORD-SIZE") .dest("recordSize") .help("message size in bytes. Note that you must provide exactly one of --record-size or --payload-file " + "or --payload-monotonic."); payloadOptions.addArgument("--payload-file") .action(store()) .required(false) .type(String.class) .metavar("PAYLOAD-FILE") .dest("payloadFile") .help("file to read the message payloads from. This works only for UTF-8 encoded text files. " + "Payloads will be read from this file and a payload will be randomly selected when sending messages. " + "Note that you must provide exactly one of --record-size or --payload-file or --payload-monotonic."); payloadOptions.addArgument("--payload-monotonic") .action(storeTrue()) .type(Boolean.class) .metavar("PAYLOAD-MONOTONIC") .dest("payloadMonotonic") .help("payload is monotonically increasing integer. Note that you must provide exactly one of --record-size " + "or --payload-file or --payload-monotonic."); parser.addArgument("--payload-delimiter") .action(store()) .required(false) .type(String.class) .metavar("PAYLOAD-DELIMITER") .dest("payloadDelimiter") .setDefault("\\n") .help("provides delimiter to be used when --payload-file is provided. " + "Defaults to new line. " + "Note that this parameter will be ignored if --payload-file is not provided."); parser.addArgument("--throughput") .action(store()) .required(true) .type(Double.class) .metavar("THROUGHPUT") .help("throttle maximum message throughput to *approximately* THROUGHPUT messages/sec. Set this to -1 to disable throttling."); parser.addArgument("--producer-props") .nargs("+") .required(false) .metavar("PROP-NAME=PROP-VALUE") .type(String.class) .dest("producerConfig") .help("kafka producer related configuration properties like bootstrap.servers,client.id etc. " + "These configs take precedence over those passed via --producer.config."); parser.addArgument("--producer.config") .action(store()) .required(false) .type(String.class) .metavar("CONFIG-FILE") .dest("producerConfigFile") .help("producer config properties file."); parser.addArgument("--print-metrics") .action(storeTrue()) .type(Boolean.class) .metavar("PRINT-METRICS") .dest("printMetrics") .help("print out metrics at the end of the test."); parser.addArgument("--transactional-id") .action(store()) .required(false) .type(String.class) .metavar("TRANSACTIONAL-ID") .dest("transactionalId") .help("The transactional id to use. This config takes precedence over the transactional.id " + "specified via --producer.config or --producer-props. Note that if the transactional id " + "is not specified while --transaction-duration-ms is provided, the default value for the " + "transactional id will be performance-producer- followed by a random uuid."); parser.addArgument("--transaction-duration-ms") .action(store()) .required(false) .type(Long.class) .metavar("TRANSACTION-DURATION") .dest("transactionDurationMs") .help("The max age of each transaction. The commitTransaction will be called after this time has elapsed. " + "The value should be greater than 0. If the transactional id is specified via --producer-props, " + "--producer.config, or --transactional-id but --transaction-duration-ms is not specified, " + "the default value will be 3000."); return parser; }
@Test public void testMutuallyExclusiveGroup() { String[] args1 = new String[] { "--topic", "Hello-Kafka", "--num-records", "5", "--throughput", "100", "--record-size", "100", "--payload-monotonic", "--producer-props", "bootstrap.servers=localhost:9000"}; ArgumentParser parser1 = ProducerPerformance.argParser(); ArgumentParserException thrown = assertThrows(ArgumentParserException.class, () -> parser1.parseArgs(args1)); assertEquals("argument --payload-monotonic: not allowed with argument --record-size", thrown.getMessage()); String[] args2 = new String[] { "--topic", "Hello-Kafka", "--num-records", "5", "--throughput", "100", "--payload-file", "abc.txt", "--payload-monotonic", "--producer-props", "bootstrap.servers=localhost:9000"}; ArgumentParser parser2 = ProducerPerformance.argParser(); thrown = assertThrows(ArgumentParserException.class, () -> parser2.parseArgs(args2)); assertEquals("argument --payload-monotonic: not allowed with argument --payload-file", thrown.getMessage()); }
@Override public TbPair<Boolean, JsonNode> upgrade(int fromVersion, JsonNode oldConfiguration) throws TbNodeException { return fromVersion == 0 ? upgradeToUseFetchToAndDataToFetch(oldConfiguration) : new TbPair<>(false, oldConfiguration); }
@Test public void givenOldConfig_whenUpgrade_thenShouldReturnTrueResultWithNewConfig() throws Exception { var defaultConfig = new TbGetEntityDataNodeConfiguration().defaultConfiguration(); var node = new TbGetCustomerAttributeNode(); String oldConfig = "{\"attrMapping\":{\"alarmThreshold\":\"threshold\"},\"telemetry\":false}"; JsonNode configJson = JacksonUtil.toJsonNode(oldConfig); TbPair<Boolean, JsonNode> upgrade = node.upgrade(0, configJson); Assertions.assertTrue(upgrade.getFirst()); Assertions.assertEquals(defaultConfig, JacksonUtil.treeToValue(upgrade.getSecond(), defaultConfig.getClass())); }
public void write(CruiseConfig configForEdit, OutputStream output, boolean skipPreprocessingAndValidation) throws Exception { LOGGER.debug("[Serializing Config] Starting to write. Validation skipped? {}", skipPreprocessingAndValidation); MagicalGoConfigXmlLoader loader = new MagicalGoConfigXmlLoader(configCache, registry); if (!configForEdit.getOrigin().isLocal()) { throw new GoConfigInvalidException(configForEdit, "Attempted to save merged configuration with partials"); } if (!skipPreprocessingAndValidation) { loader.preprocessAndValidate(configForEdit); LOGGER.debug("[Serializing Config] Done with cruise config validators."); } Document document = createEmptyCruiseConfigDocument(); write(configForEdit, document.getRootElement(), configCache, registry); LOGGER.debug("[Serializing Config] XSD and DOM validation."); verifyXsdValid(document); MagicalGoConfigXmlLoader.validateDom(document.getRootElement(), registry); LOGGER.info("[Serializing Config] Generating config partial."); XmlUtils.writeXml(document, output); LOGGER.debug("[Serializing Config] Finished writing config partial."); }
@Test public void shouldFailValidationIfPackageTypeMaterialForPipelineHasARefToNonExistantPackage() throws Exception { String packageId = "does-not-exist"; PackageMaterialConfig packageMaterialConfig = new PackageMaterialConfig(packageId); PackageRepository repository = com.thoughtworks.go.domain.packagerepository.PackageRepositoryMother.create("repo-id", "repo-name", "pluginid", "version", new Configuration(com.thoughtworks.go.domain.packagerepository.ConfigurationPropertyMother.create("k1", false, "v1"))); packageMaterialConfig.setPackageDefinition( com.thoughtworks.go.domain.packagerepository.PackageDefinitionMother.create("does-not-exist", "package-name", new Configuration(com.thoughtworks.go.domain.packagerepository.ConfigurationPropertyMother.create("k2", false, "v2")), repository)); JobConfig jobConfig = new JobConfig("ls"); jobConfig.addTask(new AntTask()); cruiseConfig.addPipeline("default", com.thoughtworks.go.helper.PipelineConfigMother.pipelineConfig("test", new MaterialConfigs(packageMaterialConfig), new JobConfigs(jobConfig))); try { xmlWriter.write(cruiseConfig, output, false); fail("should not allow this"); } catch (XsdValidationException exception) { assertThat(exception.getMessage(), is("Key 'packageIdReferredByMaterial' with value 'does-not-exist' not found for identity constraint of element 'cruise'.")); } }
@SuppressWarnings("MethodMayBeStatic") @Udf(description = "The 2 input points should be specified as (lat, lon) pairs, measured" + " in decimal degrees. An optional fifth parameter allows to specify either \"MI\" (miles)" + " or \"KM\" (kilometers) as the desired unit for the output measurement. Default is KM.") public Double geoDistance( @UdfParameter(description = "The latitude of the first point in decimal degrees.") final double lat1, @UdfParameter(description = "The longitude of the first point in decimal degrees.") final double lon1, @UdfParameter(description = "The latitude of the second point in decimal degrees.") final double lat2, @UdfParameter(description = "The longitude of the second point in decimal degrees.") final double lon2, @UdfParameter(description = "The units for the return value. Either MILES or KM.") final String units ) { validateLatLonValues(lat1, lon1, lat2, lon2); final double chosenRadius = selectEarthRadiusToUse(units); final double deltaLat = Math.toRadians(lat2 - lat1); final double deltaLon = Math.toRadians(lon2 - lon1); final double lat1Radians = Math.toRadians(lat1); final double lat2Radians = Math.toRadians(lat2); final double a = haversin(deltaLat) + haversin(deltaLon) * Math.cos(lat1Radians) * Math.cos(lat2Radians); final double distanceInRadians = 2 * Math.asin(Math.sqrt(a)); return distanceInRadians * chosenRadius; }
@Test public void shouldComputeDistanceSouthHemisphere() { assertEquals(11005.2330, (double) distanceUdf.geoDistance(-33.9323, 18.4197, -33.8666, 151.1), 0.5); assertEquals(11005.2330, (double) distanceUdf.geoDistance(-33.9323, 18.4197, -33.8666, 151.1, "KM"), 0.5); assertEquals(6838.7564, (double) distanceUdf.geoDistance(-33.9323, 18.4197, -33.8666, 151.1, "MI"), 0.5); }
@Subscribe public void onChatMessage(ChatMessage event) { if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM) { String message = Text.removeTags(event.getMessage()); Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message); Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message); Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message); Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message); Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message); Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message); Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message); Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message); Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message); Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message); Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message); Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message); Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message); Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message); Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message); Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message); Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message); Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message); Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message); Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message); if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE)) { notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered"); } else if (dodgyBreakMatcher.find()) { notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust."); updateDodgyNecklaceCharges(MAX_DODGY_CHARGES); } else if (dodgyCheckMatcher.find()) { updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1))); } else if (dodgyProtectMatcher.find()) { updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1))); } else if (amuletOfChemistryCheckMatcher.find()) { updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1))); } else if (amuletOfChemistryUsedMatcher.find()) { final String match = amuletOfChemistryUsedMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateAmuletOfChemistryCharges(charges); } else if (amuletOfChemistryBreakMatcher.find()) { notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust."); updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES); } else if (amuletOfBountyCheckMatcher.find()) { updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1))); } else if (amuletOfBountyUsedMatcher.find()) { updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1))); } else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT)) { updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES); } else if (message.contains(BINDING_BREAK_TEXT)) { notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT); // This chat message triggers before the used message so add 1 to the max charges to ensure proper sync updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1); } else if (bindingNecklaceUsedMatcher.find()) { final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); if (equipment.contains(ItemID.BINDING_NECKLACE)) { updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1); } } else if (bindingNecklaceCheckMatcher.find()) { final String match = bindingNecklaceCheckMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateBindingNecklaceCharges(charges); } else if (ringOfForgingCheckMatcher.find()) { final String match = ringOfForgingCheckMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateRingOfForgingCharges(charges); } else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY)) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); // Determine if the player smelted with a Ring of Forging equipped. if (equipment == null) { return; } if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1)) { int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES); updateRingOfForgingCharges(charges); } } else if (message.equals(RING_OF_FORGING_BREAK_TEXT)) { notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted."); // This chat message triggers before the used message so add 1 to the max charges to ensure proper sync updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1); } else if (chronicleAddMatcher.find()) { final String match = chronicleAddMatcher.group(1); if (match.equals("one")) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1); } else { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match)); } } else if (chronicleUseAndCheckMatcher.find()) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1))); } else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1); } else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0); } else if (message.equals(CHRONICLE_FULL_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000); } else if (slaughterActivateMatcher.find()) { final String found = slaughterActivateMatcher.group(1); if (found == null) { updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES); notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT); } else { updateBraceletOfSlaughterCharges(Integer.parseInt(found)); } } else if (slaughterCheckMatcher.find()) { updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1))); } else if (expeditiousActivateMatcher.find()) { final String found = expeditiousActivateMatcher.group(1); if (found == null) { updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES); notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT); } else { updateExpeditiousBraceletCharges(Integer.parseInt(found)); } } else if (expeditiousCheckMatcher.find()) { updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1))); } else if (bloodEssenceCheckMatcher.find()) { updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1))); } else if (bloodEssenceExtractMatcher.find()) { updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1))); } else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT)) { updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES); } else if (braceletOfClayCheckMatcher.find()) { updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1))); } else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN)) { final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); // Determine if the player mined with a Bracelet of Clay equipped. if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY)) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); // Charge is not used if only 1 inventory slot is available when mining in Prifddinas boolean ignore = inventory != null && inventory.count() == 27 && message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN); if (!ignore) { int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES); updateBraceletOfClayCharges(charges); } } } else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT)) { notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust"); updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES); } } }
@Test public void testChronicleAddFull() { ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", CHRONICLE_ADD_FULL, "", 0); itemChargePlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_CHRONICLE, 1000); }
@Override public Annotation getAnnotation() { return this.annotation; }
@Test public void baseInfoTest() { final Annotation annotation = ClassForTest1.class.getAnnotation(AnnotationForTest.class); final Method attribute = ReflectUtil.getMethod(AnnotationForTest.class, "value"); final CacheableAnnotationAttribute annotationAttribute = new CacheableAnnotationAttribute(annotation, attribute); // 注解属性 assertEquals(annotation, annotationAttribute.getAnnotation()); assertEquals(annotation.annotationType(), annotationAttribute.getAnnotationType()); // 方法属性 assertEquals(attribute.getName(), annotationAttribute.getAttributeName()); assertEquals(attribute.getReturnType(), annotationAttribute.getAttributeType()); }
public CreateTableBuilder withPkConstraintName(String pkConstraintName) { this.pkConstraintName = validateConstraintName(pkConstraintName); return this; }
@Test public void withPkConstraintName_does_not_fail_if_name_contains_numbers() { underTest.withPkConstraintName("a0123456789"); }
@Override public void executeUpdate(final AlterStorageUnitStatement sqlStatement, final ContextManager contextManager) { checkBefore(sqlStatement); Map<String, DataSourcePoolProperties> propsMap = DataSourceSegmentsConverter.convert(database.getProtocolType(), sqlStatement.getStorageUnits()); validateHandler.validate(propsMap, getExpectedPrivileges(sqlStatement)); try { contextManager.getPersistServiceFacade().getMetaDataManagerPersistService().alterStorageUnits(database.getName(), propsMap); } catch (final SQLException | ShardingSphereExternalException ex) { throw new StorageUnitsOperateException("alter", propsMap.keySet(), ex); } }
@Test void assertExecuteUpdateSuccess() { ResourceMetaData resourceMetaData = mock(ResourceMetaData.class, RETURNS_DEEP_STUBS); StorageUnit storageUnit = mock(StorageUnit.class, RETURNS_DEEP_STUBS); ConnectionProperties connectionProps = mockConnectionProperties("ds_0"); when(storageUnit.getConnectionProperties()).thenReturn(connectionProps); when(resourceMetaData.getStorageUnits()).thenReturn(Collections.singletonMap("ds_0", storageUnit)); when(database.getResourceMetaData()).thenReturn(resourceMetaData); assertDoesNotThrow(() -> executor.executeUpdate(createAlterStorageUnitStatement("ds_0"), mockContextManager(mock(MetaDataContexts.class, RETURNS_DEEP_STUBS)))); }
@Override protected Map<String, String> getHealthInformation() { if (!configuration.getBackgroundJobServer().isEnabled()) { healthStatus = HealthStatus.UP; return mapOf("backgroundJobServer", "disabled"); } else { if (backgroundJobServer.isRunning()) { healthStatus = HealthStatus.UP; return mapOf( "backgroundJobServer", "enabled", "backgroundJobServerStatus", "running" ); } else { healthStatus = HealthStatus.DOWN; return mapOf( "backgroundJobServer", "enabled", "backgroundJobServerStatus", "stopped" ); } } }
@Test void givenDisabledBackgroundJobServer_ThenHealthIsUp() { when(backgroundJobServerConfiguration.isEnabled()).thenReturn(false); jobRunrHealthIndicator.getHealthInformation(); assertThat(jobRunrHealthIndicator.getHealthStatus()).isEqualTo(HealthStatus.UP); }
@VisibleForTesting ZonedDateTime resolveAbsoluteDateTime(ZonedDateTime absoluteDateTime, Duration timeRange, ZonedDateTime now) { if (timeRange != null) { if (absoluteDateTime != null) { throw new IllegalArgumentException("Parameters 'startDate' and 'timeRange' are mutually exclusive"); } return now.minus(timeRange.abs()); } return absoluteDateTime; }
@Test void resolveAbsoluteDateTime() { final ZonedDateTime absoluteTimestamp = ZonedDateTime.of(2023, 2, 3, 4, 6,10, 0, ZoneId.systemDefault()); final Duration offset = Duration.ofSeconds(5L); final ZonedDateTime baseTimestamp = ZonedDateTime.of(2024, 2, 3, 5, 6,10, 0, ZoneId.systemDefault()); assertThat(executionController.resolveAbsoluteDateTime(absoluteTimestamp, null, null), is(absoluteTimestamp)); assertThat(executionController.resolveAbsoluteDateTime(null, offset, baseTimestamp), is(baseTimestamp.minus(offset))); assertThrows(IllegalArgumentException.class, () -> executionController.resolveAbsoluteDateTime(absoluteTimestamp, offset, baseTimestamp)); }
public static List<alluxio.grpc.Metric> reportClientMetrics() { long start = System.currentTimeMillis(); List<alluxio.grpc.Metric> metricsList = reportMetrics(InstanceType.CLIENT); LOG.debug("Get the client metrics list contains {} metrics to report to leading master in {}ms", metricsList.size(), System.currentTimeMillis() - start); return metricsList; }
@Test public void testReportClientMetrics() { String metricName = "Client.TestMetric"; Counter counter = MetricsSystem.counter(metricName); if (!MetricKey.isValid(metricName)) { MetricKey.register(new MetricKey.Builder(metricName) .setMetricType(MetricType.COUNTER).setIsClusterAggregated(true).build()); } counter.inc(5); assertEquals(5.0, MetricsSystem.reportClientMetrics().get(0).getValue(), 0); assertEquals(0, MetricsSystem.reportClientMetrics().size()); counter.inc(2); assertEquals(2.0, MetricsSystem.reportClientMetrics().get(0).getValue(), 0); assertEquals(0, MetricsSystem.reportClientMetrics().size()); }
public Stream open(InputStream in) throws IOException { return this.delegate.open(in); }
@Test public void testParseMultipleJsons() throws Exception { final JsonParser parser = new JsonParser(); final String multipleJsons = "{\"col1\": 1}{\"col1\": 2}"; try (JsonParser.Stream stream = parser.open(toInputStream(multipleJsons))) { assertEquals("{\"col1\":1}", stream.next().toJson()); assertEquals("{\"col1\":2}", stream.next().toJson()); assertNull(stream.next()); } }
public PasswordAlgorithm defaultPasswordAlgorithm() { return defaultPasswordAlgorithm; }
@Test public void testDefaultPasswordAlgorithm() throws Exception { final PasswordAlgorithm defaultPasswordAlgorithm = mock(PasswordAlgorithm.class); final PasswordAlgorithmFactory passwordAlgorithmFactory = new PasswordAlgorithmFactory(Collections.<String, PasswordAlgorithm>emptyMap(), defaultPasswordAlgorithm); assertThat(passwordAlgorithmFactory.defaultPasswordAlgorithm()).isEqualTo(defaultPasswordAlgorithm); }
static CapsVersionAndHash generateVerificationString(DiscoverInfoView discoverInfo) { return generateVerificationString(discoverInfo, null); }
@Test public void testSimpleGenerationExample() throws XmppStringprepException { DiscoverInfo di = createSimpleSamplePacket(); CapsVersionAndHash versionAndHash = EntityCapsManager.generateVerificationString(di, StringUtils.SHA1); assertEquals("QgayPKawpkPSDYmwT/WM94uAlu0=", versionAndHash.version); }
@Override public MetricsRepository load() { List<Metric> metrics = new ArrayList<>(); try { loadFromPaginatedWs(metrics); } catch (Exception e) { throw new IllegalStateException("Unable to load metrics", e); } return new MetricsRepository(metrics); }
@Test public void test() { MetricsRepository metricsRepository = metricsRepositoryLoader.load(); assertThat(metricsRepository.metrics()).hasSize(3); WsTestUtil.verifyCall(wsClient, WS_URL + "1"); WsTestUtil.verifyCall(wsClient, WS_URL + "2"); verifyNoMoreInteractions(wsClient); }
public String getId(String name) { // Use the id directly if it is unique and the length is less than max if (name.length() <= maxHashLength && usedIds.add(name)) { return name; } // Pick the last bytes of hashcode and use hex format final String hexString = Integer.toHexString(name.hashCode()); final String origId = hexString.length() <= maxHashLength ? hexString : hexString.substring(Math.max(0, hexString.length() - maxHashLength)); String id = origId; int suffixNum = 2; while (!usedIds.add(id)) { // A duplicate! Retry. id = origId + "-" + suffixNum++; } LOG.info("Name {} is mapped to id {}", name, id); return id; }
@Test public void testGetId() { final HashIdGenerator idGenerator = new HashIdGenerator(); final Set<String> ids = ImmutableSet.of( idGenerator.getId(Count.perKey().getName()), idGenerator.getId(MapElements.into(null).getName()), idGenerator.getId(Count.globally().getName()), idGenerator.getId(Combine.perKey(mock(SerializableFunction.class)).getName()), idGenerator.getId(Min.perKey().getName()), idGenerator.getId(Max.globally().getName())); Assert.assertEquals(6, ids.size()); }
public static Object[] realize(Object[] objs, Class<?>[] types) { if (objs.length != types.length) { throw new IllegalArgumentException("args.length != types.length"); } Object[] dests = new Object[objs.length]; for (int i = 0; i < objs.length; i++) { dests[i] = realize(objs[i], types[i]); } return dests; }
@Test void testMapToEnum() throws Exception { Map map = new HashMap(); map.put("name", "MONDAY"); Object o = PojoUtils.realize(map, Day.class); assertEquals(o, Day.MONDAY); }
@Subscribe @AllowConcurrentEvents public void handleIndexClosing(IndicesClosedEvent event) { for (String index : event.indices()) { if (!indexSetRegistry.isManagedIndex(index)) { LOG.debug("Not handling closed index <{}> because it's not managed by any index set.", index); continue; } LOG.debug("Index \"{}\" has been closed. Removing index range.", index); if (remove(index)) { auditEventSender.success(AuditActor.system(nodeId), ES_INDEX_RANGE_DELETE, ImmutableMap.of("index_name", index)); } } }
@Test @MongoDBFixtures("MongoIndexRangeServiceTest.json") public void testHandleIndexClosing() throws Exception { when(indexSetRegistry.isManagedIndex("graylog_1")).thenReturn(true); assertThat(indexRangeService.findAll()).hasSize(2); localEventBus.post(IndicesClosedEvent.create(Collections.singleton("graylog_1"))); assertThat(indexRangeService.findAll()).hasSize(1); }
public String lookup(final String name, final String uriParamName, final boolean isReLookup) { final long beginNs = clock.nanoTime(); maxTimeTracker.update(beginNs); String resolvedName = null; try { resolvedName = delegateResolver.lookup(name, uriParamName, isReLookup); return resolvedName; } finally { final long endNs = clock.nanoTime(); maxTimeTracker.measureAndUpdate(endNs); logLookup(delegateResolver.getClass().getSimpleName(), endNs - beginNs, name, isReLookup, resolvedName); } }
@Test void lookupShouldMeasureExecutionTime() { final NameResolver delegateResolver = mock(NameResolver.class); when(delegateResolver.lookup(anyString(), anyString(), anyBoolean())) .thenAnswer(invocation -> { final String name = invocation.getArgument(0); return name.substring(0, name.indexOf(':')); }); final NanoClock clock = mock(NanoClock.class); final long beginNs = 0; final long endNs = 123456789; when(clock.nanoTime()).thenReturn(beginNs, endNs); final DutyCycleTracker maxTime = mock(DutyCycleTracker.class); final TimeTrackingNameResolver resolver = new TimeTrackingNameResolver(delegateResolver, clock, maxTime); final String name = "my-host:8080"; final String endpoint = "endpoint"; final boolean isReLookup = false; assertEquals("my-host", resolver.lookup(name, endpoint, isReLookup)); final InOrder inOrder = inOrder(delegateResolver, clock, maxTime); inOrder.verify(clock).nanoTime(); inOrder.verify(maxTime).update(beginNs); inOrder.verify(delegateResolver).lookup(name, endpoint, isReLookup); inOrder.verify(clock).nanoTime(); inOrder.verify(maxTime).measureAndUpdate(endNs); inOrder.verifyNoMoreInteractions(); }
public Schema toKsqlSchema(final Schema schema) { try { final Schema rowSchema = toKsqlFieldSchema(schema); if (rowSchema.type() != Schema.Type.STRUCT) { throw new KsqlException("KSQL stream/table schema must be structured"); } if (rowSchema.fields().isEmpty()) { throw new KsqlException("Schema does not include any columns with " + "types that ksqlDB supports." + System.lineSeparator() + "schema: " + FORMATTER.format(schema)); } return rowSchema; } catch (final UnsupportedTypeException e) { throw new KsqlException("Unsupported type at root of schema: " + e.getMessage(), e); } }
@Test public void shouldTranslateStructInsideArray() { final Schema connectSchema = SchemaBuilder .struct() .field( "arrayField", SchemaBuilder.array( SchemaBuilder.struct() .field("innerIntField", Schema.OPTIONAL_INT32_SCHEMA) .build())) .build(); final Schema ksqlSchema = translator.toKsqlSchema(connectSchema); assertThat(ksqlSchema.field(nameTranslator.apply("arrayField")), notNullValue()); final Schema arraySchema = ksqlSchema.field(nameTranslator.apply("arrayField")).schema(); assertThat(arraySchema.type(), equalTo(Schema.Type.ARRAY)); assertThat(arraySchema.valueSchema().type(), equalTo(Schema.Type.STRUCT)); assertThat(arraySchema.valueSchema().fields().size(), equalTo(1)); assertThat(arraySchema.valueSchema().fields().get(0).name(), equalTo(nameTranslator.apply("innerIntField"))); assertThat(arraySchema.valueSchema().fields().get(0).schema(), equalTo(Schema.OPTIONAL_INT32_SCHEMA)); }
@Nonnull @Override public Optional<? extends Padding> parse( @Nullable String str, @Nonnull DetectionLocation detectionLocation) { if (str == null) { return Optional.empty(); } if (str.toUpperCase().contains("OAEP")) { final JcaOAEPPaddingMapper jcaOAEPPaddingMapper = new JcaOAEPPaddingMapper(); return jcaOAEPPaddingMapper.parse(str, detectionLocation); } return map(str, detectionLocation); }
@Test void padding() { DetectionLocation testDetectionLocation = new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL"); JcaPaddingMapper jcaPaddingMapper = new JcaPaddingMapper(); Optional<? extends INode> asset = jcaPaddingMapper.parse("PKCS1Padding", testDetectionLocation); assertThat(asset).isPresent(); assertThat(asset.get()).isInstanceOf(PKCS1.class); }
void runOnce() { if (transactionManager != null) { try { transactionManager.maybeResolveSequences(); RuntimeException lastError = transactionManager.lastError(); // do not continue sending if the transaction manager is in a failed state if (transactionManager.hasFatalError()) { if (lastError != null) maybeAbortBatches(lastError); client.poll(retryBackoffMs, time.milliseconds()); return; } if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) { return; } // Check whether we need a new producerId. If so, we will enqueue an InitProducerId // request which will be sent below transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); if (maybeSendAndPollTransactionalRequest()) { return; } } catch (AuthenticationException e) { // This is already logged as error, but propagated here to perform any clean ups. log.trace("Authentication exception while processing transactional request", e); transactionManager.authenticationFailed(e); } } long currentTimeMs = time.milliseconds(); long pollTimeout = sendProducerData(currentTimeMs); client.poll(pollTimeout, currentTimeMs); }
@Test public void testRetryWhenProducerIdChanges() throws InterruptedException { final long producerId = 343434L; TransactionManager transactionManager = createTransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Short.MAX_VALUE, Errors.NONE); assertTrue(transactionManager.hasProducerId()); int maxRetries = 10; Metrics m = new Metrics(); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions); Future<RecordMetadata> responseFuture = appendToAccumulator(tp0); sender.runOnce(); // connect. sender.runOnce(); // send. String id = client.requests().peek().destination(); Node node = new Node(Integer.parseInt(id), "localhost", 0); assertEquals(1, client.inFlightRequestCount()); assertTrue(client.isReady(node, time.milliseconds()), "Client ready status should be true"); client.disconnect(id); assertEquals(0, client.inFlightRequestCount()); assertFalse(client.isReady(node, time.milliseconds()), "Client ready status should be false"); sender.runOnce(); // receive error sender.runOnce(); // reset producer ID because epoch is maxed out prepareAndReceiveInitProducerId(producerId + 1, Errors.NONE); sender.runOnce(); // nothing to do, since the pid has changed. We should check the metrics for errors. assertEquals(1, client.inFlightRequestCount(), "Expected requests to be retried after pid change"); assertFalse(responseFuture.isDone()); assertEquals(1, (long) transactionManager.sequenceNumber(tp0)); }
public String join(final Stream<?> parts) { return join(parts.iterator()); }
@Test public void shouldHandleFourItems() { assertThat(joiner.join(ImmutableList.of(1, 2, 3, 4)), is("1, 2, 3 or 4")); }
public FlatFileStore(MessageStoreConfig storeConfig, MetadataStore metadataStore, MessageStoreExecutor executor) { this.storeConfig = storeConfig; this.metadataStore = metadataStore; this.executor = executor; this.flatFileFactory = new FlatFileFactory(metadataStore, storeConfig); this.flatFileConcurrentMap = new ConcurrentHashMap<>(); }
@Test public void flatFileStoreTest() { // Empty recover MessageStoreExecutor executor = new MessageStoreExecutor(); FlatFileStore fileStore = new FlatFileStore(storeConfig, metadataStore, executor); Assert.assertTrue(fileStore.load()); Assert.assertEquals(storeConfig, fileStore.getStoreConfig()); Assert.assertEquals(metadataStore, fileStore.getMetadataStore()); Assert.assertNotNull(fileStore.getFlatFileFactory()); for (int i = 0; i < 4; i++) { MessageQueue mq = new MessageQueue("flatFileStoreTest", storeConfig.getBrokerName(), i); FlatMessageFile flatFile = fileStore.computeIfAbsent(mq); FlatMessageFile flatFileGet = fileStore.getFlatFile(mq); Assert.assertEquals(flatFile, flatFileGet); } Assert.assertEquals(4, fileStore.deepCopyFlatFileToList().size()); fileStore.shutdown(); fileStore = new FlatFileStore(storeConfig, metadataStore, executor); Assert.assertTrue(fileStore.load()); Assert.assertEquals(4, fileStore.deepCopyFlatFileToList().size()); for (int i = 1; i < 3; i++) { MessageQueue mq = new MessageQueue("flatFileStoreTest", storeConfig.getBrokerName(), i); fileStore.destroyFile(mq); } Assert.assertEquals(2, fileStore.deepCopyFlatFileToList().size()); fileStore.shutdown(); FlatFileStore fileStoreSpy = Mockito.spy(fileStore); Mockito.when(fileStoreSpy.recoverAsync(any())).thenReturn(CompletableFuture.supplyAsync(() -> { throw new TieredStoreException(TieredStoreErrorCode.ILLEGAL_PARAM, "Test"); })); Assert.assertFalse(fileStoreSpy.load()); Mockito.reset(fileStoreSpy); fileStore.load(); Assert.assertEquals(2, fileStore.deepCopyFlatFileToList().size()); fileStore.destroy(); Assert.assertEquals(0, fileStore.deepCopyFlatFileToList().size()); }
public static String dnsEncode(String name) throws IOException { String[] parts = name.split("\\."); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); for (String part : parts) { byte[] bytes = toUtf8Bytes("_" + normalise(part)); if (bytes == null) { break; } bytes[0] = (byte) (bytes.length - 1); outputStream.write(bytes); } return Numeric.toHexString(outputStream.toByteArray()) + "00"; }
@Test void testDnsEncode() throws IOException { String dnsEncoded = NameHash.dnsEncode("1.offchainexample.eth"); assertEquals("0x01310f6f6666636861696e6578616d706c650365746800", dnsEncoded); }
public AccessPrivilege getAccessPrivilege(InetAddress addr) { return getAccessPrivilege(addr.getHostAddress(), addr.getCanonicalHostName()); }
@Test public void testExactAddressRO() { NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, address1); Assert.assertEquals(AccessPrivilege.READ_ONLY, matcher.getAccessPrivilege(address1, hostname1)); Assert.assertEquals(AccessPrivilege.NONE, matcher.getAccessPrivilege(address2, hostname1)); }
public static RestSettingBuilder head() { return all(HttpMethod.HEAD); }
@Test public void should_head_with_all() throws Exception { server.resource("targets", head().response(header("ETag", "Moco")) ); running(server, () -> { HttpResponse httpResponse = helper.headForResponse(remoteUrl("/targets")); assertThat(httpResponse.getCode(), is(200)); assertThat(httpResponse.getHeaders("ETag")[0].getValue(), is("Moco")); }); }
@Override @CacheEvict(cacheNames = RedisKeyConstants.SMS_TEMPLATE, allEntries = true) // allEntries 清空所有缓存,因为 id 不是直接的缓存 code,不好清理 public void deleteSmsTemplate(Long id) { // 校验存在 validateSmsTemplateExists(id); // 更新 smsTemplateMapper.deleteById(id); }
@Test public void testDeleteSmsTemplate_success() { // mock 数据 SmsTemplateDO dbSmsTemplate = randomSmsTemplateDO(); smsTemplateMapper.insert(dbSmsTemplate);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbSmsTemplate.getId(); // 调用 smsTemplateService.deleteSmsTemplate(id); // 校验数据不存在了 assertNull(smsTemplateMapper.selectById(id)); }
public Object get(final String property) { return props.get(property); }
@Test public void shouldGetCurrentValue() { assertThat(propsWithMockParser.get("prop-1"), is("parsed-initial-val-1")); }
public boolean write(final int msgTypeId, final DirectBuffer srcBuffer, final int offset, final int length) { checkTypeId(msgTypeId); checkMsgLength(length); final AtomicBuffer buffer = this.buffer; final int recordLength = length + HEADER_LENGTH; final int recordIndex = claimCapacity(buffer, recordLength); if (INSUFFICIENT_CAPACITY == recordIndex) { return false; } buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength); MemoryAccess.releaseFence(); buffer.putBytes(encodedMsgOffset(recordIndex), srcBuffer, offset, length); buffer.putInt(typeOffset(recordIndex), msgTypeId); buffer.putIntOrdered(lengthOffset(recordIndex), recordLength); return true; }
@Test void shouldWriteToEmptyBuffer() { final int length = 8; final int recordLength = length + HEADER_LENGTH; final int alignedRecordLength = align(recordLength, ALIGNMENT); final long tail = 0L; final long head = 0L; when(buffer.getLongVolatile(HEAD_COUNTER_INDEX)).thenReturn(head); when(buffer.getLong(TAIL_COUNTER_INDEX)).thenReturn(tail); final UnsafeBuffer srcBuffer = new UnsafeBuffer(allocateDirect(12)); final int srcIndex = 4; assertTrue(ringBuffer.write(MSG_TYPE_ID, srcBuffer, srcIndex, length)); final InOrder inOrder = inOrder(buffer); inOrder.verify(buffer).putLongOrdered(TAIL_COUNTER_INDEX, tail + alignedRecordLength); inOrder.verify(buffer).putLong((int)tail + alignedRecordLength, 0L); inOrder.verify(buffer).putIntOrdered(lengthOffset((int)tail), -recordLength); inOrder.verify(buffer).putBytes(encodedMsgOffset((int)tail), srcBuffer, srcIndex, length); inOrder.verify(buffer).putInt(typeOffset((int)tail), MSG_TYPE_ID); inOrder.verify(buffer).putIntOrdered(lengthOffset((int)tail), recordLength); }
public boolean tryLock() { try { lockRandomAccessFile = new RandomAccessFile(lockFilePath.toFile(), "rw"); lockChannel = lockRandomAccessFile.getChannel(); lockFile = lockChannel.tryLock(0, 1024, false); return lockFile != null; } catch (IOException e) { throw new IllegalStateException("Failed to create lock in " + lockFilePath.toString(), e); } }
@Test public void errorTryLock() { lock = new DirectoryLock(Paths.get("non", "existing", "path")); assertThatThrownBy(() -> lock.tryLock()) .isInstanceOf(IllegalStateException.class) .hasMessageContaining("Failed to create lock"); }
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) { return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context); }
@Test public void testShowCreateHiveExternalTable() { new MockUp<MetadataMgr>() { @Mock public Database getDb(String catalogName, String dbName) { return new Database(); } @Mock public Table getTable(String catalogName, String dbName, String tblName) { List<Column> fullSchema = new ArrayList<>(); Column columnId = new Column("id", Type.INT, true); columnId.setComment("id"); Column columnName = new Column("name", Type.VARCHAR); Column columnYear = new Column("year", Type.INT); Column columnDt = new Column("dt", Type.INT); fullSchema.add(columnId); fullSchema.add(columnName); fullSchema.add(columnYear); fullSchema.add(columnDt); List<String> partitions = Lists.newArrayList(); partitions.add("year"); partitions.add("dt"); HiveTable.Builder tableBuilder = HiveTable.builder() .setId(1) .setTableName("test_table") .setCatalogName("hive_catalog") .setResourceName(toResourceName("hive_catalog", "hive")) .setHiveDbName("hive_db") .setHiveTableName("test_table") .setPartitionColumnNames(partitions) .setFullSchema(fullSchema) .setTableLocation("hdfs://hadoop/hive/warehouse/test.db/test") .setCreateTime(10000) .setHiveTableType(HiveTable.HiveTableType.EXTERNAL_TABLE); return tableBuilder.build(); } }; ShowCreateTableStmt stmt = new ShowCreateTableStmt(new TableName("hive_catalog", "hive_db", "test_table"), ShowCreateTableStmt.CreateTableType.TABLE); ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx); Assert.assertEquals("test_table", resultSet.getResultRows().get(0).get(0)); Assert.assertEquals("CREATE EXTERNAL TABLE `test_table` (\n" + " `id` int(11) DEFAULT NULL COMMENT \"id\",\n" + " `name` varchar DEFAULT NULL,\n" + " `year` int(11) DEFAULT NULL,\n" + " `dt` int(11) DEFAULT NULL\n" + ")\n" + "PARTITION BY (year, dt)\n" + "PROPERTIES (\"location\" = \"hdfs://hadoop/hive/warehouse/test.db/test\");", resultSet.getResultRows().get(0).get(1)); }
@Override public String toString() { boolean traceHi = traceIdHigh != 0; char[] result = new char[traceHi ? 32 : 16]; int pos = 0; if (traceHi) { writeHexLong(result, pos, traceIdHigh); pos += 16; } writeHexLong(result, pos, traceId); return new String(result); }
@Test void testToString() { assertThat(base.toBuilder().traceIdHigh(222L).build().toString()) .isEqualTo("00000000000000de000000000000014d"); }
@Override public MutableAnalysisMetadataHolder setUuid(String s) { checkState(!uuid.isInitialized(), "Analysis uuid has already been set"); requireNonNull(s, "Analysis uuid can't be null"); this.uuid.setProperty(s); return this; }
@Test public void setUuid_throws_ISE_if_called_twice() { underTest.setUuid("org1"); assertThatThrownBy(() -> underTest.setUuid("org1")) .isInstanceOf(IllegalStateException.class) .hasMessage("Analysis uuid has already been set"); }
public static PTransform<PCollection<? extends KV<?, ?>>, PCollection<String>> kvs() { return kvs(","); }
@Test @Category(NeedsRunner.class) public void testToStringKV() { ArrayList<KV<String, Integer>> kvs = new ArrayList<>(); kvs.add(KV.of("one", 1)); kvs.add(KV.of("two", 2)); ArrayList<String> expected = new ArrayList<>(); expected.add("one,1"); expected.add("two,2"); PCollection<KV<String, Integer>> input = p.apply(Create.of(kvs)); PCollection<String> output = input.apply(ToString.kvs()); PAssert.that(output).containsInAnyOrder(expected); p.run(); }
static String getConfigValueAsString(ServiceConfiguration conf, String configProp) throws IllegalArgumentException { String value = getConfigValueAsStringImpl(conf, configProp); log.info("Configuration for [{}] is [{}]", configProp, value); return value; }
@Test public void testGetConfigValueAsStringReturnsNullIfMissing() { Properties props = new Properties(); ServiceConfiguration config = new ServiceConfiguration(); config.setProperties(props); String actual = ConfigUtils.getConfigValueAsString(config, "prop1"); assertNull(actual); }
@Override public ProcessingLogger getLogger( final String name ) { return getLogger(name, Collections.emptyMap()); }
@Test public void shouldCreateLoggerWithoutPassingInTags() { // Given: final ProcessingLogger testLogger = factory.getLogger("foo.bar"); final Sensor sensor = metricCollectors.getMetrics().getSensor("foo.bar"); final Map<String, String> metricsTags = new HashMap<>(customMetricsTags); metricsTags.put("logger-id", "foo.bar"); // When: sensor.record(); sensor.record(); // Then: assertThat(testLogger, is(this.loggerWithMetrics)); verify(innerFactory).getLogger("foo.bar"); verify(loggerFactory).apply(config, innerLogger); verify(loggerWithMetricsFactory).apply(metricCollectors.getMetrics()); verify(loggerWithMetricsFactoryHelper).apply(logger, sensor); // verify the metric was created correctly assertThat(getMetricValue(metricsTags), equalTo(2.0)); }
@Override protected List<ParentRunner<?>> getChildren() { return children; }
@Test void finds_no_features_when_explicit_feature_path_has_no_features() throws InitializationError { Cucumber cucumber = new Cucumber(ExplicitFeaturePathWithNoFeatures.class); List<ParentRunner<?>> children = cucumber.getChildren(); assertThat(children, is(equalTo(emptyList()))); }
public static ResolvedSchema expandCompositeTypeToSchema(DataType dataType) { if (dataType instanceof FieldsDataType) { return expandCompositeType((FieldsDataType) dataType); } else if (dataType.getLogicalType() instanceof LegacyTypeInformationType && dataType.getLogicalType().getTypeRoot() == STRUCTURED_TYPE) { return expandLegacyCompositeType(dataType); } throw new IllegalArgumentException("Expected a composite type"); }
@Test void testExpandRowType() { DataType dataType = ROW( FIELD("f0", INT()), FIELD("f1", STRING()), FIELD("f2", TIMESTAMP(5).bridgedTo(Timestamp.class)), FIELD("f3", TIMESTAMP(3))); ResolvedSchema schema = DataTypeUtils.expandCompositeTypeToSchema(dataType); assertThat(schema) .isEqualTo( ResolvedSchema.of( Column.physical("f0", INT()), Column.physical("f1", STRING()), Column.physical("f2", TIMESTAMP(5).bridgedTo(Timestamp.class)), Column.physical( "f3", TIMESTAMP(3).bridgedTo(LocalDateTime.class)))); }
@Override public TimeSlot apply(TimeSlot timeSlot, SegmentInMinutes segmentInMinutes) { int segmentInMinutesDuration = segmentInMinutes.value(); Instant segmentStart = normalizeStart(timeSlot.from(), segmentInMinutesDuration); Instant segmentEnd = normalizeEnd(timeSlot.to(), segmentInMinutesDuration); TimeSlot normalized = new TimeSlot(segmentStart, segmentEnd); TimeSlot minimalSegment = new TimeSlot(segmentStart, segmentStart.plus(segmentInMinutes.value(), ChronoUnit.MINUTES)); if (normalized.within(minimalSegment)) { return minimalSegment; } return normalized; }
@Test void hasNoEffectWhenSlotAlreadyNormalized() { //given Instant start = Instant.parse("2023-09-09T00:00:00Z"); Instant end = Instant.parse("2023-09-09T01:00:00Z"); TimeSlot timeSlot = new TimeSlot(start, end); SegmentInMinutes oneHour = SegmentInMinutes.of(60, FIFTEEN_MINUTES_SEGMENT_DURATION); //when TimeSlot normalized = new SlotToNormalizedSlot().apply(timeSlot, oneHour); //then assertEquals(timeSlot, normalized); }
@Override public <T> Task<T> synchronize(Task<T> task, long deadline) { return PlanLocal.get(getPlanLocalKey(), LockInternal.class) .flatMap(lockInternal -> { if (lockInternal != null) { // we already acquire the lock, add count only. lockInternal._lockCount++; return Task.value(lockInternal._lockNode); } else { // try acquire. return acquire(deadline); } }) /* run the given task with toTry() */ .flatMap(unused -> task).toTry() /* release the lock and unwind the result */ .flatMap(result -> release().andThen(unwind(result))); }
@Test public void testReleaseAfterException() throws InterruptedException { final long deadline = System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(10, TimeUnit.SECONDS); final String path = "/testPath"; final String errMsg = "Boom! There is an exception! But, who cares..."; final ZKLock lock = createZKLock(path); Task<Integer> synchronizedTask = lock.synchronize(Task.failure(new Exception(errMsg)), deadline); run(synchronizedTask); Assert.assertTrue(synchronizedTask.await(10, TimeUnit.SECONDS)); Assert.assertTrue(synchronizedTask.isFailed()); Assert.assertEquals(synchronizedTask.getError().getMessage(), errMsg); Task<List<String>> children = _zkClient.getChildren(path); runAndWait("getChildren", children); Assert.assertEquals(children.get().size(), 0); Task<ZKData> acls = _zkClient.getData(path); runAndWait("getData", acls); Assert.assertEquals(acls.get().getAclList(), _acls); }
public static RepositoryMetadataStore getInstance() { return repositoryMetadataStore; }
@Test public void shouldGetAllPluginIds() throws Exception { RepositoryMetadataStore metadataStore = RepositoryMetadataStore.getInstance(); metadataStore.addMetadataFor("plugin1", new PackageConfigurations()); metadataStore.addMetadataFor("plugin2", new PackageConfigurations()); metadataStore.addMetadataFor("plugin3", new PackageConfigurations()); assertThat(metadataStore.getPlugins().size(), is(3)); assertThat(metadataStore.getPlugins().contains("plugin1"), is(true)); assertThat(metadataStore.getPlugins().contains("plugin2"), is(true)); assertThat(metadataStore.getPlugins().contains("plugin3"), is(true)); }
@Override public void aroundWriteTo(WriterInterceptorContext ctx) throws IOException, WebApplicationException { try { ctx.proceed(); } finally { TrackedByGauge trackedByGauge = _resourceInfo.getResourceMethod().getAnnotation(TrackedByGauge.class); if (trackedByGauge != null) { _controllerMetrics.addValueToGlobalGauge(trackedByGauge.gauge(), -1L); } } }
@Test(expectedExceptions = IOException.class) public void testWriterInterceptorDecrementsGaugeWhenWriterThrowsException() throws Exception { Method methodOne = TrackedClass.class.getDeclaredMethod("trackedMethod"); when(_resourceInfo.getResourceMethod()).thenReturn(methodOne); doThrow(new IOException()).when(_writerInterceptorContext).proceed(); try { _interceptor.aroundWriteTo(_writerInterceptorContext); } finally { verify(_controllerMetrics).addValueToGlobalGauge(ControllerGauge.SEGMENT_DOWNLOADS_IN_PROGRESS, -1L); } }
@Override public void transferBufferOwnership(Object oldOwner, Object newOwner, Buffer buffer) { checkState(buffer.isBuffer(), "Only buffer supports transfer ownership."); decNumRequestedBuffer(oldOwner); incNumRequestedBuffer(newOwner); buffer.setRecycler(memorySegment -> recycleBuffer(newOwner, memorySegment)); }
@Test void testTransferBufferOwnership() throws IOException { TieredStorageMemoryManagerImpl memoryManager = createStorageMemoryManager( 1, Collections.singletonList(new TieredStorageMemorySpec(this, 0))); BufferBuilder bufferBuilder = memoryManager.requestBufferBlocking(this); assertThat(memoryManager.numOwnerRequestedBuffer(this)).isEqualTo(1); BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumerFromBeginning(); Buffer buffer = bufferConsumer.build(); bufferBuilder.close(); bufferConsumer.close(); Object newOwner = new Object(); memoryManager.transferBufferOwnership(this, newOwner, buffer); assertThat(memoryManager.numOwnerRequestedBuffer(this)).isEqualTo(0); assertThat(memoryManager.numOwnerRequestedBuffer(newOwner)).isEqualTo(1); buffer.recycleBuffer(); assertThat(memoryManager.numOwnerRequestedBuffer(newOwner)).isEqualTo(0); }
public Boolean fileExists( File dir, String path ) { try { FileProvider<File> fileProvider = providerService.get( dir.getProvider() ); return fileProvider.fileExists( dir, path, space ); } catch ( InvalidFileProviderException | FileException e ) { return false; } }
@Test public void testFileExists() { TestDirectory testDirectory = new TestDirectory(); testDirectory.setPath( "/directory1" ); Assert.assertTrue( fileController.fileExists( testDirectory, "/directory1/file1" ) ); Assert.assertFalse( fileController.fileExists( testDirectory, "/directory1/file5" ) ); }
public Optional<Integer> declareManagedMemoryUseCaseAtOperatorScope( ManagedMemoryUseCase managedMemoryUseCase, int weight) { checkNotNull(managedMemoryUseCase); checkArgument( managedMemoryUseCase.scope == ManagedMemoryUseCase.Scope.OPERATOR, "Use case is not operator scope."); checkArgument(weight > 0, "Weights for operator scope use cases must be greater than 0."); return Optional.ofNullable( managedMemoryOperatorScopeUseCaseWeights.put(managedMemoryUseCase, weight)); }
@Test void testDeclareManagedMemoryOperatorScopeUseCaseFailWrongScope() { assertThatThrownBy( () -> transformation.declareManagedMemoryUseCaseAtOperatorScope( ManagedMemoryUseCase.PYTHON, 123)) .isInstanceOf(IllegalArgumentException.class); }
@PublicEvolving public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes( MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) { return getMapReturnTypes(mapInterface, inType, null, false); }
@SuppressWarnings({"unchecked", "rawtypes"}) @Test void testGenericsNotInSuperclass() { // use getMapReturnTypes() RichMapFunction<?, ?> function = new RichMapFunction<LongKeyValue<String>, LongKeyValue<String>>() { private static final long serialVersionUID = 1L; @Override public LongKeyValue<String> map(LongKeyValue<String> value) throws Exception { return null; } }; TypeInformation<?> ti = TypeExtractor.getMapReturnTypes( function, (TypeInformation) TypeInformation.of(new TypeHint<Tuple2<Long, String>>() {})); assertThat(ti.isTupleType()).isTrue(); assertThat(ti.getArity()).isEqualTo(2); TupleTypeInfo<?> tti = (TupleTypeInfo<?>) ti; assertThat(tti.getTypeClass()).isEqualTo(LongKeyValue.class); assertThat(tti.getTypeAt(0)).isEqualTo(BasicTypeInfo.LONG_TYPE_INFO); assertThat(tti.getTypeAt(1)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO); }
@SuppressWarnings({"BooleanExpressionComplexity", "CyclomaticComplexity"}) public static boolean isScalablePushQuery( final Statement statement, final KsqlExecutionContext ksqlEngine, final KsqlConfig ksqlConfig, final Map<String, Object> overrides ) { if (!isPushV2Enabled(ksqlConfig, overrides)) { return false; } if (! (statement instanceof Query)) { return false; } final Query query = (Query) statement; final SourceFinder sourceFinder = new SourceFinder(); sourceFinder.process(query.getFrom(), null); // It will be present if it's not a join, which we don't handle if (!sourceFinder.getSourceName().isPresent()) { return false; } // Find all of the writers to this particular source. final SourceName sourceName = sourceFinder.getSourceName().get(); final Set<QueryId> upstreamQueries = ksqlEngine.getQueriesWithSink(sourceName); // See if the config or override have set the stream to be "latest" final boolean isLatest = isLatest(ksqlConfig, overrides); // Cannot be a pull query, i.e. must be a push return !query.isPullQuery() // Group by is not supported && !query.getGroupBy().isPresent() // Windowing is not supported && !query.getWindow().isPresent() // Having clause is not supported && !query.getHaving().isPresent() // Partition by is not supported && !query.getPartitionBy().isPresent() // There must be an EMIT CHANGES clause && (query.getRefinement().isPresent() && query.getRefinement().get().getOutputRefinement() == OutputRefinement.CHANGES) // Must be reading from "latest" && isLatest // We only handle a single sink source at the moment from a CTAS/CSAS && upstreamQueries.size() == 1 // ROWPARTITION and ROWOFFSET are not currently supported in SPQs && !containsDisallowedColumns(query); }
@Test public void isScalablePushQuery_true_latestConfig() { try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) { // When: expectIsSPQ(ColumnName.of("foo"), columnExtractor); when(ksqlConfig.getKsqlStreamConfigProp(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)) .thenReturn(Optional.of("latest")); // Then: assertThat(ScalablePushUtil.isScalablePushQuery(query, ksqlEngine, ksqlConfig, ImmutableMap.of()), equalTo(true)); } }
@Override public Optional<ResultDecorator<MaskRule>> newInstance(final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final MaskRule maskRule, final ConfigurationProperties props, final SQLStatementContext sqlStatementContext) { return sqlStatementContext instanceof SelectStatementContext ? Optional.of(new MaskDQLResultDecorator(maskRule, (SelectStatementContext) sqlStatementContext)) : Optional.empty(); }
@Test void assertNewInstanceWithOtherStatement() { MaskResultDecoratorEngine engine = (MaskResultDecoratorEngine) OrderedSPILoader.getServices(ResultProcessEngine.class, Collections.singleton(rule)).get(rule); assertFalse(engine.newInstance(mock(RuleMetaData.class), database, rule, mock(ConfigurationProperties.class), mock(InsertStatementContext.class)).isPresent()); }
@Override public ImagesAndRegistryClient call() throws IOException, RegistryException, LayerPropertyNotFoundException, LayerCountMismatchException, BadContainerConfigurationFormatException, CacheCorruptedException, CredentialRetrievalException { EventHandlers eventHandlers = buildContext.getEventHandlers(); try (ProgressEventDispatcher progressDispatcher = progressDispatcherFactory.create("pulling base image manifest", 4); TimerEventDispatcher ignored1 = new TimerEventDispatcher(eventHandlers, DESCRIPTION)) { // Skip this step if this is a scratch image ImageReference imageReference = buildContext.getBaseImageConfiguration().getImage(); if (imageReference.isScratch()) { Set<Platform> platforms = buildContext.getContainerConfiguration().getPlatforms(); Verify.verify(!platforms.isEmpty()); eventHandlers.dispatch(LogEvent.progress("Getting scratch base image...")); ImmutableList.Builder<Image> images = ImmutableList.builder(); for (Platform platform : platforms) { Image.Builder imageBuilder = Image.builder(buildContext.getTargetFormat()); imageBuilder.setArchitecture(platform.getArchitecture()).setOs(platform.getOs()); images.add(imageBuilder.build()); } return new ImagesAndRegistryClient(images.build(), null); } eventHandlers.dispatch( LogEvent.progress("Getting manifest for base image " + imageReference + "...")); if (buildContext.isOffline()) { List<Image> images = getCachedBaseImages(); if (!images.isEmpty()) { return new ImagesAndRegistryClient(images, null); } throw new IOException( "Cannot run Jib in offline mode; " + imageReference + " not found in local Jib cache"); } else if (imageReference.getDigest().isPresent()) { List<Image> images = getCachedBaseImages(); if (!images.isEmpty()) { RegistryClient noAuthRegistryClient = buildContext.newBaseImageRegistryClientFactory().newRegistryClient(); // TODO: passing noAuthRegistryClient may be problematic. It may return 401 unauthorized // if layers have to be downloaded. // https://github.com/GoogleContainerTools/jib/issues/2220 return new ImagesAndRegistryClient(images, noAuthRegistryClient); } } Optional<ImagesAndRegistryClient> mirrorPull = tryMirrors(buildContext, progressDispatcher.newChildProducer()); if (mirrorPull.isPresent()) { return mirrorPull.get(); } try { // First, try with no credentials. This works with public GCR images (but not Docker Hub). // TODO: investigate if we should just pass credentials up front. However, this involves // some risk. https://github.com/GoogleContainerTools/jib/pull/2200#discussion_r359069026 // contains some related discussions. RegistryClient noAuthRegistryClient = buildContext.newBaseImageRegistryClientFactory().newRegistryClient(); return new ImagesAndRegistryClient( pullBaseImages(noAuthRegistryClient, progressDispatcher.newChildProducer()), noAuthRegistryClient); } catch (RegistryUnauthorizedException ex) { eventHandlers.dispatch( LogEvent.lifecycle( "The base image requires auth. Trying again for " + imageReference + "...")); Credential credential = RegistryCredentialRetriever.getBaseImageCredential(buildContext).orElse(null); RegistryClient registryClient = buildContext .newBaseImageRegistryClientFactory() .setCredential(credential) .newRegistryClient(); String wwwAuthenticate = ex.getHttpResponseException().getHeaders().getAuthenticate(); if (wwwAuthenticate != null) { eventHandlers.dispatch( LogEvent.debug("WWW-Authenticate for " + imageReference + ": " + wwwAuthenticate)); registryClient.authPullByWwwAuthenticate(wwwAuthenticate); return new ImagesAndRegistryClient( pullBaseImages(registryClient, progressDispatcher.newChildProducer()), registryClient); } else { // Not getting WWW-Authenticate is unexpected in practice, and we may just blame the // server and fail. However, to keep some old behavior, try a few things as a last resort. // TODO: consider removing this fallback branch. if (credential != null && !credential.isOAuth2RefreshToken()) { eventHandlers.dispatch( LogEvent.debug("Trying basic auth as fallback for " + imageReference + "...")); registryClient.configureBasicAuth(); try { return new ImagesAndRegistryClient( pullBaseImages(registryClient, progressDispatcher.newChildProducer()), registryClient); } catch (RegistryUnauthorizedException ignored) { // Fall back to try bearer auth. } } eventHandlers.dispatch( LogEvent.debug("Trying bearer auth as fallback for " + imageReference + "...")); registryClient.doPullBearerAuth(); return new ImagesAndRegistryClient( pullBaseImages(registryClient, progressDispatcher.newChildProducer()), registryClient); } } } }
@Test(expected = UnlistedPlatformInManifestListException.class) public void testCall_ManifestList_UnknownArchitecture() throws InvalidImageReferenceException, IOException, RegistryException, LayerPropertyNotFoundException, LayerCountMismatchException, BadContainerConfigurationFormatException, CacheCorruptedException, CredentialRetrievalException { Mockito.when(buildContext.getBaseImageConfiguration()) .thenReturn(ImageConfiguration.builder(ImageReference.parse("multiarch")).build()); Mockito.when(buildContext.getRegistryMirrors()) .thenReturn(ImmutableListMultimap.of("registry", "gcr.io")); Mockito.when(containerConfig.getPlatforms()) .thenReturn(ImmutableSet.of(new Platform("arm64", "linux"))); RegistryClient.Factory dockerHubRegistryClientFactory = setUpWorkingRegistryClientFactoryWithV22ManifestList(); Mockito.when(buildContext.newBaseImageRegistryClientFactory()) .thenReturn(dockerHubRegistryClientFactory); pullBaseImageStep.call(); }