focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public SmsTemplateRespDTO getSmsTemplate(String apiTemplateId) throws Throwable { // 构建请求 QuerySmsTemplateRequest request = new QuerySmsTemplateRequest(); request.setTemplateCode(apiTemplateId); // 执行请求 QuerySmsTemplateResponse response = client.getAcsResponse(request); if (response.getTemplateStatus() == null) { return null; } return new SmsTemplateRespDTO().setId(response.getTemplateCode()).setContent(response.getTemplateContent()) .setAuditStatus(convertSmsTemplateAuditStatus(response.getTemplateStatus())).setAuditReason(response.getReason()); }
@Test public void testGetSmsTemplate() throws Throwable { // 准备参数 String apiTemplateId = randomString(); // mock 方法 QuerySmsTemplateResponse response = randomPojo(QuerySmsTemplateResponse.class, o -> { o.setCode("OK"); o.setTemplateStatus(1); // 设置模板通过 }); when(client.getAcsResponse(argThat((ArgumentMatcher<QuerySmsTemplateRequest>) acsRequest -> { assertEquals(apiTemplateId, acsRequest.getTemplateCode()); return true; }))).thenReturn(response); // 调用 SmsTemplateRespDTO result = smsClient.getSmsTemplate(apiTemplateId); // 断言 assertEquals(response.getTemplateCode(), result.getId()); assertEquals(response.getTemplateContent(), result.getContent()); assertEquals(SmsTemplateAuditStatusEnum.SUCCESS.getStatus(), result.getAuditStatus()); assertEquals(response.getReason(), result.getAuditReason()); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testReadFilesWithExpressions() { run( "def foo = 'fooValue'", "def bar = 'barValue'", "def dataFromYml = read('read-expressions.yml')", "def dataFromJson = read('read-expressions.json')" ); Variable dataFromYml = sr.engine.vars.get("dataFromYml"); Variable dataFromJson = sr.engine.vars.get("dataFromJson"); assertEquals(dataFromYml.getAsString(), dataFromJson.getAsString()); assertEquals(dataFromYml.getAsString(), "[{\"item\":{\"foo\":\"fooValue\",\"nested\":{\"bar\":\"barValue\",\"notfound\":\"#(baz)\"}}}]"); assertEquals(dataFromJson.getAsString(), "[{\"item\":{\"foo\":\"fooValue\",\"nested\":{\"bar\":\"barValue\",\"notfound\":\"#(baz)\"}}}]"); }
public static int[] copyWith(int[] replicas, int value) { int[] newReplicas = new int[replicas.length + 1]; System.arraycopy(replicas, 0, newReplicas, 0, replicas.length); newReplicas[newReplicas.length - 1] = value; return newReplicas; }
@Test public void testCopyWith() { assertArrayEquals(new int[] {-1}, Replicas.copyWith(new int[] {}, -1)); assertArrayEquals(new int[] {1, 2, 3, 4}, Replicas.copyWith(new int[] {1, 2, 3}, 4)); }
@Nullable public RouterFunction<ServerResponse> create(ReverseProxy reverseProxy, String pluginName) { return createReverseProxyRouterFunction(reverseProxy, nullSafePluginName(pluginName)); }
@Test void shouldProxyStaticResourceWithCacheControl() throws FileNotFoundException { var cache = webProperties.getResources().getCache(); cache.setUseLastModified(true); cache.getCachecontrol().setMaxAge(Duration.ofDays(7)); var routerFunction = factory.create(mockReverseProxy(), "fakeA"); assertNotNull(routerFunction); var webClient = WebTestClient.bindToRouterFunction(routerFunction).build(); var pluginWrapper = Mockito.mock(PluginWrapper.class); var pluginRoot = ResourceUtils.getURL("classpath:plugin/plugin-for-reverseproxy/"); var classLoader = new URLClassLoader(new URL[] {pluginRoot}); when(pluginWrapper.getPluginClassLoader()).thenReturn(classLoader); when(pluginManager.getPlugin("fakeA")).thenReturn(pluginWrapper); webClient.get().uri("/plugins/fakeA/assets/static/test.txt") .exchange() .expectStatus().isOk() .expectHeader().cacheControl(CacheControl.maxAge(Duration.ofDays(7))) .expectHeader().value(HttpHeaders.LAST_MODIFIED, Assertions::assertNotNull) .expectBody(String.class).isEqualTo("Fake content."); }
@Override public long nextId() { return get(nextIdAsync()); }
@Test public void testEmpty() { RIdGenerator generator = redisson.getIdGenerator("test"); for (int i = 1; i <= 100103; i++) { assertThat(generator.nextId()).isEqualTo(i); } }
@Override public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets( String groupId, Map<TopicPartition, OffsetAndMetadata> offsets, AlterConsumerGroupOffsetsOptions options ) { SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, Errors>> future = AlterConsumerGroupOffsetsHandler.newFuture(groupId); AlterConsumerGroupOffsetsHandler handler = new AlterConsumerGroupOffsetsHandler(groupId, offsets, logContext); invokeDriver(handler, future, options.timeoutMs); return new AlterConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId))); }
@Test public void testAlterConsumerGroupOffsets() throws Exception { // Happy path final TopicPartition tp1 = new TopicPartition("foo", 0); final TopicPartition tp2 = new TopicPartition("bar", 0); final TopicPartition tp3 = new TopicPartition("foobar", 0); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse( prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); Map<TopicPartition, Errors> responseData = new HashMap<>(); responseData.put(tp1, Errors.NONE); responseData.put(tp2, Errors.NONE); env.kafkaClient().prepareResponse(new OffsetCommitResponse(0, responseData)); Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); offsets.put(tp1, new OffsetAndMetadata(123L)); offsets.put(tp2, new OffsetAndMetadata(456L)); final AlterConsumerGroupOffsetsResult result = env.adminClient().alterConsumerGroupOffsets( GROUP_ID, offsets); assertNull(result.all().get()); assertNull(result.partitionResult(tp1).get()); assertNull(result.partitionResult(tp2).get()); TestUtils.assertFutureError(result.partitionResult(tp3), IllegalArgumentException.class); } }
@Override public GetApplicationReportResponse getApplicationReport( GetApplicationReportRequest request) throws YarnException { ApplicationId applicationId = request.getApplicationId(); if (applicationId == null) { throw new ApplicationNotFoundException("Invalid application id: null"); } UserGroupInformation callerUGI = getCallerUgi(applicationId, AuditConstants.GET_APP_REPORT); RMApp application = verifyUserAccessForRMApp(applicationId, callerUGI, AuditConstants.GET_APP_REPORT, ApplicationAccessType.VIEW_APP, false); boolean allowAccess = checkAccess(callerUGI, application.getUser(), ApplicationAccessType.VIEW_APP, application); ApplicationReport report = application.createAndGetApplicationReport(callerUGI.getUserName(), allowAccess); GetApplicationReportResponse response = recordFactory .newRecordInstance(GetApplicationReportResponse.class); response.setApplicationReport(report); return response; }
@Test public void testNonExistingApplicationReport() throws YarnException { RMContext rmContext = mock(RMContext.class); when(rmContext.getRMApps()).thenReturn( new ConcurrentHashMap<ApplicationId, RMApp>()); ClientRMService rmService = new ClientRMService(rmContext, null, null, null, null, null); GetApplicationReportRequest request = recordFactory .newRecordInstance(GetApplicationReportRequest.class); request.setApplicationId(ApplicationId.newInstance(0, 0)); try { rmService.getApplicationReport(request); Assert.fail(); } catch (ApplicationNotFoundException ex) { Assert.assertEquals(ex.getMessage(), "Application with id '" + request.getApplicationId() + "' doesn't exist in RM. Please check that the " + "job submission was successful."); } }
public boolean isCredentialsAllowed() { return allowCredentials; }
@Test public void allowCredentials() { final CorsConfig cors = forAnyOrigin().allowCredentials().build(); assertThat(cors.isCredentialsAllowed(), is(true)); }
public List<Subscriber> getSubscribers(String serviceName, String namespaceId, boolean aggregation) { if (aggregation) { Collection<Subscriber> result = aggregationService.getFuzzySubscribers(namespaceId, serviceName); return CollectionUtils.isNotEmpty(result) ? result.stream().filter(distinctByKey(Subscriber::toString)) .collect(Collectors.toList()) : Collections.EMPTY_LIST; } else { return new LinkedList<>(localService.getFuzzySubscribers(namespaceId, serviceName)); } }
@Test void testGetSubscribersFuzzy() { String serviceName = "test"; String namespaceId = "public"; boolean aggregation = Boolean.TRUE; try { List<Subscriber> clients = new ArrayList<Subscriber>(); Subscriber subscriber = new Subscriber("127.0.0.1:8080", "test", "app", "127.0.0.1", namespaceId, "testGroupName@@test_subscriber", 0); clients.add(subscriber); Mockito.when(this.aggregation.getFuzzySubscribers(Mockito.anyString(), Mockito.anyString())).thenReturn(clients); List<Subscriber> list = subscribeManager.getSubscribers(serviceName, namespaceId, aggregation); assertNotNull(list); assertEquals(1, list.size()); assertEquals("testGroupName@@test_subscriber", list.get(0).getServiceName()); } catch (Exception ignored) { } }
public Status refreshConfiguration(final CliClientService cliClientService, final String groupId, final int timeoutMs) throws InterruptedException, TimeoutException { Requires.requireTrue(!StringUtils.isBlank(groupId), "Blank group id"); Requires.requireTrue(timeoutMs > 0, "Invalid timeout: " + timeoutMs); final Configuration conf = getConfiguration(groupId); if (conf == null) { return new Status(RaftError.ENOENT, "Group %s is not registered in RouteTable, forgot to call updateConfiguration?", groupId); } final Status st = Status.OK(); PeerId leaderId = selectLeader(groupId); if (leaderId == null) { refreshLeader(cliClientService, groupId, timeoutMs); leaderId = selectLeader(groupId); } if (leaderId == null) { st.setError(-1, "Fail to get leader of group %s", groupId); return st; } if (!cliClientService.connect(leaderId.getEndpoint())) { st.setError(-1, "Fail to init channel to %s", leaderId); return st; } final CliRequests.GetPeersRequest.Builder rb = CliRequests.GetPeersRequest.newBuilder(); rb.setGroupId(groupId); rb.setLeaderId(leaderId.toString()); try { final Message result = cliClientService.getPeers(leaderId.getEndpoint(), rb.build(), null).get(timeoutMs, TimeUnit.MILLISECONDS); if (result instanceof CliRequests.GetPeersResponse) { final CliRequests.GetPeersResponse resp = (CliRequests.GetPeersResponse) result; final Configuration newConf = new Configuration(); for (final String peerIdStr : resp.getPeersList()) { final PeerId newPeer = new PeerId(); newPeer.parse(peerIdStr); newConf.addPeer(newPeer); } for (final String learnerIdStr : resp.getLearnersList()) { final PeerId newLearner = new PeerId(); newLearner.parse(learnerIdStr); newConf.addLearner(newLearner); } if (!conf.equals(newConf)) { LOG.info("Configuration of replication group {} changed from {} to {}", groupId, conf, newConf); } updateConfiguration(groupId, newConf); } else { final RpcRequests.ErrorResponse resp = (RpcRequests.ErrorResponse) result; st.setError(resp.getErrorCode(), resp.getErrorMsg()); } } catch (final Exception e) { st.setError(-1, e.getMessage()); } return st; }
@Test public void testRefreshConfiguration() throws Exception { final RouteTable rt = RouteTable.getInstance(); final List<PeerId> partConf = new ArrayList<>(); partConf.add(cluster.getLeader().getLeaderId()); // part of peers conf, only contains leader peer rt.updateConfiguration(groupId, new Configuration(partConf)); // fetch all conf final Status st = rt.refreshConfiguration(cliClientService, groupId, 10000); assertTrue(st.isOk()); final Configuration newCnf = rt.getConfiguration(groupId); assertArrayEquals(new HashSet<>(cluster.getPeers()).toArray(), new HashSet<>(newCnf.getPeerSet()).toArray()); }
@Override public Serde<GenericKey> create( final FormatInfo format, final PersistenceSchema schema, final KsqlConfig ksqlConfig, final Supplier<SchemaRegistryClient> schemaRegistryClientFactory, final String loggerNamePrefix, final ProcessingLogContext processingLogContext, final Optional<TrackedCallback> tracker ) { return createInner( format, schema, ksqlConfig, schemaRegistryClientFactory, loggerNamePrefix, processingLogContext, tracker ); }
@Test public void shouldNotThrowOnNoKeyColumns() { // Given: schema = PersistenceSchema.from(ImmutableList.of(), SerdeFeatures.of()); // When: factory .create(format, schema, config, srClientFactory, LOGGER_PREFIX, processingLogCxt, Optional.empty()); // Then (did not throw): }
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks, final Map<TaskId, Set<TopicPartition>> standbyTasks) { log.info("Handle new assignment with:\n" + "\tNew active tasks: {}\n" + "\tNew standby tasks: {}\n" + "\tExisting active tasks: {}\n" + "\tExisting standby tasks: {}", activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds()); topologyMetadata.addSubscribedTopicsFromAssignment( activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), logPrefix ); final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks); final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks); final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>(); final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id)); final Set<TaskId> tasksToLock = tasks.allTaskIds().stream() .filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x)) .collect(Collectors.toSet()); maybeLockTasks(tasksToLock); // first put aside those unrecognized tasks because of unknown named-topologies tasks.clearPendingTasksToCreate(); tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate)); tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate)); // first rectify all existing tasks: // 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them // 2. for tasks that have changed active/standby status, just recycle and skip re-creating them // 3. otherwise, close them since they are no longer owned final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>(); if (stateUpdater == null) { handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean); } else { handleTasksWithStateUpdater( activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean, failedTasks ); failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater()); } final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean); maybeUnlockTasks(tasksToLock); failedTasks.putAll(taskCloseExceptions); maybeThrowTaskExceptions(failedTasks); createNewTasks(activeTasksToCreate, standbyTasksToCreate); }
@Test public void shouldHandleExceptionThrownDuringRecyclingActiveTask() { final StreamTask activeTaskToRecycle = statefulTask(taskId00, taskId00ChangelogPartitions) .inState(State.RESTORING) .withInputPartitions(taskId00Partitions).build(); final TasksRegistry tasks = mock(TasksRegistry.class); final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true); when(stateUpdater.getTasks()).thenReturn(mkSet(activeTaskToRecycle)); when(standbyTaskCreator.createStandbyTaskFromActive(activeTaskToRecycle, activeTaskToRecycle.inputPartitions())) .thenThrow(new RuntimeException()); final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>(); when(stateUpdater.remove(activeTaskToRecycle.id())).thenReturn(future); future.complete(new StateUpdater.RemovedTaskResult(activeTaskToRecycle)); assertThrows( StreamsException.class, () -> taskManager.handleAssignment( Collections.emptyMap(), mkMap(mkEntry(activeTaskToRecycle.id(), activeTaskToRecycle.inputPartitions())) ) ); verify(stateUpdater, never()).add(any()); verify(tasks, never()).addPendingTasksToInit(Collections.singleton(any())); verify(activeTaskToRecycle).closeDirty(); }
public static InetSocketAddress getInetSocketAddressFromRpcURL(String rpcURL) throws Exception { // Pekko URLs have the form schema://systemName@host:port/.... if it's a remote Pekko URL try { final Address address = getAddressFromRpcURL(rpcURL); if (address.host().isDefined() && address.port().isDefined()) { return new InetSocketAddress(address.host().get(), (int) address.port().get()); } else { throw new MalformedURLException(); } } catch (MalformedURLException e) { throw new Exception("Could not retrieve InetSocketAddress from Pekko URL " + rpcURL); } }
@Test void getHostFromRpcURLHandlesAkkaTcpProtocol() throws Exception { final String url = "pekko.tcp://flink@localhost:1234/user/jobmanager"; final InetSocketAddress expected = new InetSocketAddress("localhost", 1234); final InetSocketAddress result = PekkoUtils.getInetSocketAddressFromRpcURL(url); assertThat(result).isEqualTo(expected); }
@Override public Double getNumber( Object object ) throws KettleValueException { Long timestampAsInteger = getInteger( object ); if ( null != timestampAsInteger ) { return timestampAsInteger.doubleValue(); } else { return null; } }
@Test public void testConvertTimestampToNumber_DefaultMode() throws KettleValueException { System.setProperty( Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE, Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE_LEGACY ); ValueMetaTimestamp valueMetaTimestamp = new ValueMetaTimestamp(); double result = valueMetaTimestamp.getNumber( TIMESTAMP_WITH_NANOSECONDS ); assertEquals( 1567308896123.0, result, 0 ); System.setProperty( Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE, "Something invalid!" ); valueMetaTimestamp = new ValueMetaTimestamp(); result = valueMetaTimestamp.getNumber( TIMESTAMP_WITH_NANOSECONDS ); assertEquals( 1567308896123.0, result, 0 ); }
@GET @Timed @ApiOperation(value = "A few details about the Graylog node.") @Produces(MediaType.APPLICATION_JSON) public HelloWorldResponse helloWorld() { final ClusterId clusterId = clusterConfigService.getOrDefault(ClusterId.class, ClusterId.create("UNKNOWN")); return HelloWorldResponse.create( clusterId.clusterId(), nodeId.getNodeId(), Version.CURRENT_CLASSPATH.toString(), "Manage your logs in the dark and have lasers going and make it look like you're from space!" ); }
@Test public void rootResourceShouldReturnGeneralStats() throws Exception { final HelloWorldResponse helloWorldResponse = this.helloWorldResource.helloWorld(); assertThat(helloWorldResponse).isNotNull(); assertThat(helloWorldResponse.clusterId()).isEqualTo(CK_CLUSTER_ID); assertThat(helloWorldResponse.nodeId()).isEqualTo(CK_NODE_ID); }
ControllerResult<AssignReplicasToDirsResponseData> handleAssignReplicasToDirs(AssignReplicasToDirsRequestData request) { if (!featureControl.metadataVersion().isDirectoryAssignmentSupported()) { throw new UnsupportedVersionException("Directory assignment is not supported yet."); } int brokerId = request.brokerId(); clusterControl.checkBrokerEpoch(brokerId, request.brokerEpoch()); BrokerRegistration brokerRegistration = clusterControl.brokerRegistrations().get(brokerId); if (brokerRegistration == null) { throw new BrokerIdNotRegisteredException("Broker ID " + brokerId + " is not currently registered"); } List<ApiMessageAndVersion> records = new ArrayList<>(); AssignReplicasToDirsResponseData response = new AssignReplicasToDirsResponseData(); Set<TopicIdPartition> leaderAndIsrUpdates = new HashSet<>(); for (AssignReplicasToDirsRequestData.DirectoryData reqDir : request.directories()) { Uuid dirId = reqDir.id(); boolean directoryIsOffline = !brokerRegistration.hasOnlineDir(dirId); AssignReplicasToDirsResponseData.DirectoryData resDir = new AssignReplicasToDirsResponseData.DirectoryData().setId(dirId); for (AssignReplicasToDirsRequestData.TopicData reqTopic : reqDir.topics()) { Uuid topicId = reqTopic.topicId(); Errors topicError = Errors.NONE; TopicControlInfo topicInfo = this.topics.get(topicId); if (topicInfo == null) { log.warn("AssignReplicasToDirsRequest from broker {} references unknown topic ID {}", brokerId, topicId); topicError = Errors.UNKNOWN_TOPIC_ID; } AssignReplicasToDirsResponseData.TopicData resTopic = new AssignReplicasToDirsResponseData.TopicData().setTopicId(topicId); for (AssignReplicasToDirsRequestData.PartitionData reqPartition : reqTopic.partitions()) { int partitionIndex = reqPartition.partitionIndex(); Errors partitionError = topicError; if (topicError == Errors.NONE) { String topicName = topicInfo.name; PartitionRegistration partitionRegistration = topicInfo.parts.get(partitionIndex); if (partitionRegistration == null) { log.warn("AssignReplicasToDirsRequest from broker {} references unknown partition {}-{}", brokerId, topicName, partitionIndex); partitionError = Errors.UNKNOWN_TOPIC_OR_PARTITION; } else if (!Replicas.contains(partitionRegistration.replicas, brokerId)) { log.warn("AssignReplicasToDirsRequest from broker {} references non assigned partition {}-{}", brokerId, topicName, partitionIndex); partitionError = Errors.NOT_LEADER_OR_FOLLOWER; } else { Optional<ApiMessageAndVersion> partitionChangeRecord = new PartitionChangeBuilder( partitionRegistration, topicId, partitionIndex, new LeaderAcceptor(clusterControl, partitionRegistration), featureControl.metadataVersion(), getTopicEffectiveMinIsr(topicName) ) .setDirectory(brokerId, dirId) .setDefaultDirProvider(clusterDescriber) .build(); partitionChangeRecord.ifPresent(records::add); if (directoryIsOffline) { leaderAndIsrUpdates.add(new TopicIdPartition(topicId, partitionIndex)); } if (log.isDebugEnabled()) { log.debug("Broker {} assigned partition {}:{} to {} dir {}", brokerId, topics.get(topicId).name(), partitionIndex, directoryIsOffline ? "OFFLINE" : "ONLINE", dirId); } } } resTopic.partitions().add(new AssignReplicasToDirsResponseData.PartitionData(). setPartitionIndex(partitionIndex). setErrorCode(partitionError.code())); } resDir.topics().add(resTopic); } response.directories().add(resDir); } if (!leaderAndIsrUpdates.isEmpty()) { generateLeaderAndIsrUpdates("offline-dir-assignment", brokerId, NO_LEADER, NO_LEADER, records, leaderAndIsrUpdates.iterator()); } return ControllerResult.of(records, response); }
@Test void testHandleAssignReplicasToDirsFailsOnOlderMv() { ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder(). setMetadataVersion(MetadataVersion.IBP_3_7_IV1). build(); assertThrows(UnsupportedVersionException.class, () -> ctx.replicationControl.handleAssignReplicasToDirs(new AssignReplicasToDirsRequestData())); }
protected void mergeAndRevive(ConsumeReviveObj consumeReviveObj) throws Throwable { ArrayList<PopCheckPoint> sortList = consumeReviveObj.genSortList(); POP_LOGGER.info("reviveQueueId={}, ck listSize={}", queueId, sortList.size()); if (sortList.size() != 0) { POP_LOGGER.info("reviveQueueId={}, 1st ck, startOffset={}, reviveOffset={}; last ck, startOffset={}, reviveOffset={}", queueId, sortList.get(0).getStartOffset(), sortList.get(0).getReviveOffset(), sortList.get(sortList.size() - 1).getStartOffset(), sortList.get(sortList.size() - 1).getReviveOffset()); } long newOffset = consumeReviveObj.oldOffset; for (PopCheckPoint popCheckPoint : sortList) { if (!shouldRunPopRevive) { POP_LOGGER.info("slave skip ck process, revive topic={}, reviveQueueId={}", reviveTopic, queueId); break; } if (consumeReviveObj.endTime - popCheckPoint.getReviveTime() <= (PopAckConstants.ackTimeInterval + PopAckConstants.SECOND)) { break; } // check normal topic, skip ck , if normal topic is not exist String normalTopic = KeyBuilder.parseNormalTopic(popCheckPoint.getTopic(), popCheckPoint.getCId()); if (brokerController.getTopicConfigManager().selectTopicConfig(normalTopic) == null) { POP_LOGGER.warn("reviveQueueId={}, can not get normal topic {}, then continue", queueId, popCheckPoint.getTopic()); newOffset = popCheckPoint.getReviveOffset(); continue; } if (null == brokerController.getSubscriptionGroupManager().findSubscriptionGroupConfig(popCheckPoint.getCId())) { POP_LOGGER.warn("reviveQueueId={}, can not get cid {}, then continue", queueId, popCheckPoint.getCId()); newOffset = popCheckPoint.getReviveOffset(); continue; } while (inflightReviveRequestMap.size() > 3) { waitForRunning(100); Pair<Long, Boolean> pair = inflightReviveRequestMap.firstEntry().getValue(); if (!pair.getObject2() && System.currentTimeMillis() - pair.getObject1() > 1000 * 30) { PopCheckPoint oldCK = inflightReviveRequestMap.firstKey(); rePutCK(oldCK, pair); inflightReviveRequestMap.remove(oldCK); POP_LOGGER.warn("stay too long, remove from reviveRequestMap, {}, {}, {}, {}", popCheckPoint.getTopic(), popCheckPoint.getBrokerName(), popCheckPoint.getQueueId(), popCheckPoint.getStartOffset()); } } reviveMsgFromCk(popCheckPoint); newOffset = popCheckPoint.getReviveOffset(); } if (newOffset > consumeReviveObj.oldOffset) { if (!shouldRunPopRevive) { POP_LOGGER.info("slave skip commit, revive topic={}, reviveQueueId={}", reviveTopic, queueId); return; } this.brokerController.getConsumerOffsetManager().commitOffset(PopAckConstants.LOCAL_HOST, PopAckConstants.REVIVE_GROUP, reviveTopic, queueId, newOffset); } reviveOffset = newOffset; consumeReviveObj.newOffset = newOffset; }
@Test public void testReviveMsgFromCk_messageFound_writeRetryFailed_rewriteCK_noEnd() throws Throwable { brokerConfig.setSkipWhenCKRePutReachMaxTimes(false); PopCheckPoint ck = buildPopCheckPoint(0, 0, 0); ck.setRePutTimes(Byte.MAX_VALUE + ""); PopReviveService.ConsumeReviveObj reviveObj = new PopReviveService.ConsumeReviveObj(); reviveObj.map.put("", ck); reviveObj.endTime = System.currentTimeMillis(); StringBuilder actualRetryTopic = new StringBuilder(); when(escapeBridge.getMessageAsync(anyString(), anyLong(), anyInt(), anyString(), anyBoolean())) .thenReturn(CompletableFuture.completedFuture(Triple.of(new MessageExt(), "", false))); when(escapeBridge.putMessageToSpecificQueue(any(MessageExtBrokerInner.class))).thenAnswer(invocation -> { MessageExtBrokerInner msg = invocation.getArgument(0); actualRetryTopic.append(msg.getTopic()); return new PutMessageResult(PutMessageStatus.MESSAGE_ILLEGAL, new AppendMessageResult(AppendMessageStatus.MESSAGE_SIZE_EXCEEDED)); }); popReviveService.mergeAndRevive(reviveObj); Assert.assertEquals(KeyBuilder.buildPopRetryTopic(TOPIC, GROUP, false), actualRetryTopic.toString()); verify(escapeBridge, times(1)).putMessageToSpecificQueue(any(MessageExtBrokerInner.class)); // write retry verify(messageStore, times(1)).putMessage(any(MessageExtBrokerInner.class)); // rewrite CK }
@VisibleForTesting static AbsoluteUnixPath getAppRootChecked( RawConfiguration rawConfiguration, ProjectProperties projectProperties) throws InvalidAppRootException { String appRoot = rawConfiguration.getAppRoot(); if (appRoot.isEmpty()) { appRoot = projectProperties.isWarProject() ? DEFAULT_JETTY_APP_ROOT : JavaContainerBuilder.DEFAULT_APP_ROOT; } try { return AbsoluteUnixPath.get(appRoot); } catch (IllegalArgumentException ex) { throw new InvalidAppRootException(appRoot, appRoot, ex); } }
@Test public void testGetAppRootChecked_errorOnWindowsPathWithDriveLetter() { when(rawConfiguration.getAppRoot()).thenReturn("C:\\windows\\path"); Exception exception = assertThrows( InvalidAppRootException.class, () -> PluginConfigurationProcessor.getAppRootChecked( rawConfiguration, projectProperties)); assertThat(exception).hasMessageThat().isEqualTo("C:\\windows\\path"); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthGetLogsWithBlockHash() throws Exception { web3j.ethGetLogs( new EthFilter( "0xe670ec64341771606e55d6b4ca35a1a6b75ee3d5145a99d05921026d1527331", "")) .send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getLogs\"," + "\"params\":[{\"topics\":[]," + "\"blockHash\":\"0xe670ec64341771606e55d6b4ca35a1a6b75ee3d5145a99d05921026d1527331\"," + "\"address\":[\"\"]}],\"id\":<generatedValue>}"); }
@Override public CompletableFuture<ByteBuffer> getStateAsync(String key) { ensureStateEnabled(); return defaultStateStore.getAsync(key); }
@Test public void testGetStateStateEnabled() throws Exception { context.defaultStateStore = mock(BKStateStoreImpl.class); context.getStateAsync("test-key"); verify(context.defaultStateStore, times(1)).getAsync(eq("test-key")); }
public static boolean isBearerToken(final String authorizationHeader) { return StringUtils.hasText(authorizationHeader) && authorizationHeader.startsWith(TOKEN_PREFIX); }
@Test void testIsBearerToken_WithValidBearerToken() { // Given String authorizationHeader = "Bearer sampleAccessToken"; // When boolean result = Token.isBearerToken(authorizationHeader); // Then assertTrue(result); }
public static <T> PCollections<T> pCollections() { return new PCollections<>(); }
@Test @Category(NeedsRunner.class) public void testCompatibleWindowFnPropagation() { PCollection<String> input1 = p.apply("CreateInput1", Create.of("Input1")) .apply("Window1", Window.into(Sessions.withGapDuration(Duration.standardMinutes(1)))); PCollection<String> input2 = p.apply("CreateInput2", Create.of("Input2")) .apply("Window2", Window.into(Sessions.withGapDuration(Duration.standardMinutes(2)))); PCollection<String> output = PCollectionList.of(input1).and(input2).apply(Flatten.pCollections()); p.run(); Assert.assertTrue( output .getWindowingStrategy() .getWindowFn() .isCompatible(Sessions.withGapDuration(Duration.standardMinutes(2)))); }
@Override public boolean isIn(String ipAddress) { if (ipAddress == null || addressList == null) { return false; } return addressList.includes(ipAddress); }
@Test public void testFileMissing() { IPList ipl = new FileBasedIPList("missingips.txt"); assertFalse("110.113.221.222 is in the list", ipl.isIn("110.113.221.222")); }
@Override protected String transform(ILoggingEvent event, String in) { AnsiElement element = ELEMENTS.get(getFirstOption()); List<Marker> markers = event.getMarkerList(); if ((markers != null && !markers.isEmpty() && markers.get(0).contains(CRLF_SAFE_MARKER)) || isLoggerSafe(event)) { return in; } String replacement = element == null ? "_" : toAnsiString("_", element); return in.replaceAll("[\n\r\t]", replacement); }
@Test void transformShouldReplaceNewlinesAndCarriageReturnsWithUnderscoreWhenMarkersDoNotContainCRLFSafeMarkerAndLoggerIsNotSafe() { ILoggingEvent event = mock(ILoggingEvent.class); List<Marker> markers = Collections.emptyList(); when(event.getMarkerList()).thenReturn(markers); when(event.getLoggerName()).thenReturn("com.mycompany.myapp.example.Logger"); String input = "Test\ninput\rstring"; CRLFLogConverter converter = new CRLFLogConverter(); String result = converter.transform(event, input); assertEquals("Test_input_string", result); }
public static Map<String,String> trimMapNulls( Map<String,String> dnMap, boolean retrieveMapNullsAsEmptyStrings){ if (dnMap == null){ return null; } // Must be deterministic order map - see HIVE-8707 // => we use Maps.newLinkedHashMap instead of Maps.newHashMap if (retrieveMapNullsAsEmptyStrings) { // convert any nulls present in map values to empty strings - this is done in the case // of backing dbs like oracle which persist empty strings as nulls. return Maps.newLinkedHashMap(Maps.transformValues(dnMap, transFormNullsToEmptyString)); } else { // prune any nulls present in map values - this is the typical case. return Maps.newLinkedHashMap(Maps.filterValues(dnMap, Predicates.notNull())); } }
@Test public void testTrimMapNullsXform() throws Exception { Map<String,String> m = new HashMap<>(); m.put("akey","aval"); m.put("blank",""); m.put("null",null); Map<String, String> expected = ImmutableMap.of("akey", "aval", "blank", "", "null", ""); Map<String,String> xformed = MetaStoreServerUtils.trimMapNulls(m,true); assertThat(xformed, is(expected)); }
@Override public Health check() { if (isConnectedToDB()) { return Health.GREEN; } return RED_HEALTH; }
@Test public void status_is_RED_with_single_cause_if_any_error_occurs_when_checking_DB() { when(isAliveMapper.isAlive()).thenThrow(new RuntimeException("simulated runtime exception when querying DB")); Health health = underTest.check(); verifyRedStatus(health); }
@Override protected void set(String key, String value) { localProperties.put( requireNonNull(key, "key can't be null"), requireNonNull(value, "value can't be null").trim()); }
@Test public void set_will_throw_NPE_if_value_is_null() { assertThatThrownBy(() -> underTest.set(randomAlphanumeric(10), null)) .isInstanceOf(NullPointerException.class) .hasMessage("value can't be null"); }
@Override public long getSequence() { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void testGetSequence() { batchEventData.getSequence(); }
@Description("length of the given binary") @ScalarFunction @SqlType(StandardTypes.BIGINT) public static long length(@SqlType(StandardTypes.VARBINARY) Slice slice) { return slice.length(); }
@Test public void testHashCode() { Slice data = Slices.wrappedBuffer(ALL_BYTES); Block block = VARBINARY.createBlockBuilder(null, 1, ALL_BYTES.length) .writeBytes(data, 0, data.length()) .closeEntry() .build(); assertEquals(VarbinaryOperators.hashCode(data), VARBINARY.hash(block, 0)); }
@PostMapping("/register") public EmailRegisterResult registerEmail(@RequestBody @Valid EmailRegisterRequest request, @RequestHeader(MijnDigidSession.MIJN_DIGID_SESSION_HEADER) String mijnDigiDsessionId){ MijnDigidSession mijnDigiDSession = retrieveMijnDigiDSession(mijnDigiDsessionId); return accountService.registerEmail(mijnDigiDSession.getAccountId(), request); }
@Test public void validEmailRegister() { EmailRegisterRequest request = new EmailRegisterRequest(); request.setEmail("email"); EmailRegisterResult result = new EmailRegisterResult(); result.setStatus(Status.OK); result.setError("error"); result.setEmailAddress("address"); result.setMaxAmountEmails(3); when(accountService.registerEmail(anyLong(), any())).thenReturn(result); EmailRegisterResult registerResult = emailController.registerEmail(request, mijnDigiDSession.getId()); assertEquals(Status.OK, registerResult.getStatus()); assertEquals("error", registerResult.getError()); assertEquals(3, registerResult.getMaxAmountEmails()); assertEquals("address", registerResult.getEmailAddress()); }
public DataSourceTask(Environment environment) { super(environment); }
@Test void testDataSourceTask() throws IOException { int keyCnt = 100; int valCnt = 20; this.outList = new ArrayList<Record>(); File tempTestFile = new File(tempFolder.toFile(), UUID.randomUUID().toString()); InputFilePreparator.prepareInputFile( new UniformRecordGenerator(keyCnt, valCnt, false), tempTestFile, true); super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE); super.addOutput(this.outList); DataSourceTask<Record> testTask = new DataSourceTask<>(this.mockEnv); super.registerFileInputTask( testTask, MockInputFormat.class, tempTestFile.toURI().toString(), "\n"); try { testTask.invoke(); } catch (Exception e) { System.err.println(e); fail("Invoke method caused exception."); } try { Field formatField = DataSourceTask.class.getDeclaredField("format"); formatField.setAccessible(true); MockInputFormat inputFormat = (MockInputFormat) formatField.get(testTask); assertThat(inputFormat.opened) .withFailMessage( "Invalid status of the input format. Expected for opened: true, Actual: %b", inputFormat.opened) .isTrue(); assertThat(inputFormat.closed) .withFailMessage( "Invalid status of the input format. Expected for closed: true, Actual: %b", inputFormat.closed) .isTrue(); } catch (Exception e) { System.err.println(e); fail("Reflection error while trying to validate inputFormat status."); } assertThat(this.outList) .withFailMessage( "Invalid output size. Expected: %d, Actual: %d", keyCnt * valCnt, outList.size()) .hasSize(keyCnt * valCnt); HashMap<Integer, HashSet<Integer>> keyValueCountMap = new HashMap<>(keyCnt); for (Record kvp : this.outList) { int key = kvp.getField(0, IntValue.class).getValue(); int val = kvp.getField(1, IntValue.class).getValue(); if (!keyValueCountMap.containsKey(key)) { keyValueCountMap.put(key, new HashSet<Integer>()); } keyValueCountMap.get(key).add(val); } assertThat(keyValueCountMap) .withFailMessage( "Invalid key count in out file. Expected: %d, Actual: %d", keyCnt, keyValueCountMap.size()) .hasSize(keyCnt); for (Integer mapKey : keyValueCountMap.keySet()) { assertThat(keyValueCountMap.get(mapKey)) .withFailMessage( "Invalid value count for key: %d. Expected: %d, Actual: %d", mapKey, valCnt, keyValueCountMap.get(mapKey).size()) .hasSize(valCnt); } }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ItemCounter that = (ItemCounter) o; if (!map.equals(that.map)) { return false; } return true; }
@Test public void testEquals_returnsFalseOnNull() { assertFalse(counter.equals(null)); }
public static Path resolveRelative(Path p, String other) { Path op = Paths.get(other); if (op.isAbsolute()) { throw new IllegalArgumentException(other + " cannot be an absolute path!"); } return p.resolve(op); }
@Test public void testSafeResolution() throws Exception { Path cwd = Paths.get("."); String windows = "C:/temp/file.txt"; String linux = "/root/dir/file.txt"; assertThrows(IllegalArgumentException.class, () -> { FSUtil.resolveRelative(cwd, linux); FSUtil.resolveRelative(cwd, windows); }); }
public RuntimeOptionsBuilder parse(String... args) { return parse(Arrays.asList(args)); }
@Test void assigns_filters_from_tags() { RuntimeOptions options = parser .parse("--tags", "@keep_this") .build(); List<String> tagExpressions = options.getTagExpressions().stream() .map(Object::toString) .collect(toList()); assertThat(tagExpressions, contains("@keep_this")); }
@NonNull @SuppressFBWarnings(value = "ICAST_IDIV_CAST_TO_DOUBLE", justification = "We want to truncate here.") public static String getTimeSpanString(long duration) { // Break the duration up in to units. long years = duration / ONE_YEAR_MS; duration %= ONE_YEAR_MS; long months = duration / ONE_MONTH_MS; duration %= ONE_MONTH_MS; long days = duration / ONE_DAY_MS; duration %= ONE_DAY_MS; long hours = duration / ONE_HOUR_MS; duration %= ONE_HOUR_MS; long minutes = duration / ONE_MINUTE_MS; duration %= ONE_MINUTE_MS; long seconds = duration / ONE_SECOND_MS; duration %= ONE_SECOND_MS; long millisecs = duration; if (years > 0) return makeTimeSpanString(years, Messages.Util_year(years), months, Messages.Util_month(months)); else if (months > 0) return makeTimeSpanString(months, Messages.Util_month(months), days, Messages.Util_day(days)); else if (days > 0) return makeTimeSpanString(days, Messages.Util_day(days), hours, Messages.Util_hour(hours)); else if (hours > 0) return makeTimeSpanString(hours, Messages.Util_hour(hours), minutes, Messages.Util_minute(minutes)); else if (minutes > 0) return makeTimeSpanString(minutes, Messages.Util_minute(minutes), seconds, Messages.Util_second(seconds)); else if (seconds >= 10) return Messages.Util_second(seconds); else if (seconds >= 1) return Messages.Util_second(seconds + (float) (millisecs / 100) / 10); // render "1.2 sec" else if (millisecs >= 100) return Messages.Util_second((float) (millisecs / 10) / 100); // render "0.12 sec". else return Messages.Util_millisecond(millisecs); }
@Test public void testTimeSpanString() { // Check that amounts less than 365 days are not rounded up to a whole year. // In the previous implementation there were 360 days in a year. // We're still working on the assumption that a month is 30 days, so there will // be 5 days at the end of the year that will be "12 months" but not "1 year". // First check 359 days. assertEquals(Messages.Util_month(11), Util.getTimeSpanString(31017600000L)); // And 362 days. assertEquals(Messages.Util_month(12), Util.getTimeSpanString(31276800000L)); // 11.25 years - Check that if the first unit has 2 or more digits, a second unit isn't used. assertEquals(Messages.Util_year(11), Util.getTimeSpanString(354780000000L)); // 9.25 years - Check that if the first unit has only 1 digit, a second unit is used. assertEquals(Messages.Util_year(9) + " " + Messages.Util_month(3), Util.getTimeSpanString(291708000000L)); // 67 seconds assertEquals(Messages.Util_minute(1) + " " + Messages.Util_second(7), Util.getTimeSpanString(67000L)); // 17 seconds - Check that times less than a minute only use seconds. assertEquals(Messages.Util_second(17), Util.getTimeSpanString(17000L)); // 1712ms -> 1.7sec assertEquals(Messages.Util_second(1.7), Util.getTimeSpanString(1712L)); // 171ms -> 0.17sec assertEquals(Messages.Util_second(0.17), Util.getTimeSpanString(171L)); // 101ms -> 0.10sec assertEquals(Messages.Util_second(0.1), Util.getTimeSpanString(101L)); // 17ms assertEquals(Messages.Util_millisecond(17), Util.getTimeSpanString(17L)); // 1ms assertEquals(Messages.Util_millisecond(1), Util.getTimeSpanString(1L)); // Test JENKINS-2843 (locale with comma as fraction separator got exception for <10 sec) Locale saveLocale = Locale.getDefault(); Locale.setDefault(Locale.GERMANY); try { // Just verifying no exception is thrown: assertNotNull("German locale", Util.getTimeSpanString(1234)); assertNotNull("German locale <1 sec", Util.getTimeSpanString(123)); } finally { Locale.setDefault(saveLocale); } }
@Override public HttpResponse handle(HttpRequest request) { return new SlimeJsonResponse() {{ Cursor tokensArray = slime.setObject().setArray("tokens"); tokens.forEach((id, fingerprints) -> { Cursor tokenObject = tokensArray.addObject(); tokenObject.setString("id", id); fingerprints.forEach(tokenObject.setArray("fingerprints")::addString); }); }}; }
@Test void testFingerprints() throws IOException { CloudTokenDataPlaneHandler handler = new CloudTokenDataPlaneHandler( new Builder().tokenContext("context") .clients(new Clients.Builder().id("client1") .permissions("read") .tokens(new Tokens.Builder().id("id1") .fingerprints(List.of("pinky", "ring", "middle", "index", "thumb")) .checkAccessHashes(List.of("a", "b", "c", "d", "e")) .expirations(List.of("<none>", "<none>", "<none>", "<none>", "<none>"))) .tokens(new Tokens.Builder().id("id2") .fingerprints("toasty") .checkAccessHashes("hash") .expirations("<none>"))) .clients(new Clients.Builder().id("client2") .permissions("write") .tokens(new Tokens.Builder().id("id2") .fingerprints("toasty") .checkAccessHashes("hash") .expirations("<none>"))) .build(), Runnable::run ); HttpResponse response = handler.handle(createTestRequest("", GET)); assertEquals(200, response.getStatus()); assertEquals(""" {"tokens":[{"id":"id1","fingerprints":["index","middle","pinky","ring","thumb"]},{"id":"id2","fingerprints":["toasty"]}]}""", new ByteArrayOutputStream() {{ response.render(this); }}.toString(UTF_8)); }
@Override public SendResult send( Message msg) throws MQClientException, RemotingException, MQBrokerException, InterruptedException { msg.setTopic(withNamespace(msg.getTopic())); if (this.getAutoBatch() && !(msg instanceof MessageBatch)) { return sendByAccumulator(msg, null, null); } else { return sendDirect(msg, null, null); } }
@Test public void testSendMessageAsync_BodyCompressed() throws RemotingException, InterruptedException, MQBrokerException, MQClientException { final CountDownLatch countDownLatch = new CountDownLatch(1); when(mQClientAPIImpl.getTopicRouteInfoFromNameServer(anyString(), anyLong())).thenReturn(createTopicRoute()); producer.send(bigMessage, new SendCallback() { @Override public void onSuccess(SendResult sendResult) { assertThat(sendResult.getSendStatus()).isEqualTo(SendStatus.SEND_OK); assertThat(sendResult.getOffsetMsgId()).isEqualTo("123"); assertThat(sendResult.getQueueOffset()).isEqualTo(456L); countDownLatch.countDown(); } @Override public void onException(Throwable e) { } }); countDownLatch.await(defaultTimeout, TimeUnit.MILLISECONDS); }
public boolean isControllerId(int nodeId) { return quorumNodeIds.contains(nodeId); }
@Test public void testIsControllerId() { assertTrue(QUORUM_FEATURES.isControllerId(0)); assertTrue(QUORUM_FEATURES.isControllerId(1)); assertTrue(QUORUM_FEATURES.isControllerId(2)); assertFalse(QUORUM_FEATURES.isControllerId(3)); }
@Override public ParsedLine parse(final String line, final int cursor, final ParseContext context) { final String trimmed = line.trim(); final int adjCursor = adjustCursor(line, trimmed, cursor); return delegate.parse(trimmed, adjCursor, context); }
@Test public void shouldAdjustCursorIfInLeftWhiteSpace() { expect(delegate.parse(anyString(), eq(0), anyObject())) .andReturn(parsedLine).anyTimes(); replay(delegate); parser.parse(" line ", 0, UNSPECIFIED); parser.parse(" line ", 1, UNSPECIFIED); parser.parse(" line ", 2, UNSPECIFIED); }
void mark(WorldPoint loc, Book book) { Bookcase bookcase = byPoint.get(loc); if (bookcase == null) { log.debug("Requested non-existent bookcase at {}", loc); return; } mark(bookcase, book); }
@Test public void testVarlamoreEnvoyFindingProcess() { assertEquals(SolvedState.NO_DATA, library.getState()); library.mark(0, Book.RADAS_JOURNEY); assertEquals(SolvedState.INCOMPLETE, library.getState()); library.mark(library.step, Book.KILLING_OF_A_KING); assertEquals(SolvedState.COMPLETE, library.getState()); // The Varlamore Envoy book can be found in this bookcase, but should not cause a state reset if not found library.mark(library.step * 2, null); assertEquals(SolvedState.COMPLETE, library.getState()); library.mark(library.step * 2, Book.VARLAMORE_ENVOY); assertEquals(SolvedState.COMPLETE, library.getState()); // not valid, should reset library.mark(library.step * 2, Book.TRANSPORTATION_INCANTATIONS); assertEquals(SolvedState.INCOMPLETE, library.getState()); }
void removeRequest(String requestName) { assert requestName != null; requests.remove(requestName); }
@Test public void testRemoveRequest() { final int count = counter.getRequestsCount(); counter.addRequest("remove request", 100, 50, 50, false, 1000); counter.removeRequest("remove request"); assertEquals("requests count", count, counter.getRequestsCount()); }
@Override public void accept(MetadataShellState state) { String fullGlob = glob.startsWith("/") ? glob : state.workingDirectory() + "/" + glob; List<String> globComponents = CommandUtils.stripDotPathComponents(CommandUtils.splitPath(fullGlob)); MetadataNode root = state.root(); if (root == null) { throw new RuntimeException("Invalid null root"); } if (!accept(globComponents, 0, root, new String[0])) { handler.accept(Optional.empty()); } }
@Test public void testDoubleDotDot() { InfoConsumer consumer = new InfoConsumer(); GlobVisitor visitor = new GlobVisitor("../..", consumer); visitor.accept(DATA); assertEquals(Optional.of(Collections.singletonList( new MetadataNodeInfo(new String[0], DATA.root()))), consumer.infos); }
@Subscribe public void onVarbitChanged(VarbitChanged event) { if (event.getVarbitId() == Varbits.IN_RAID) { removeVarTimer(OVERLOAD_RAID); removeGameTimer(PRAYER_ENHANCE); } if (event.getVarbitId() == Varbits.VENGEANCE_COOLDOWN && config.showVengeance()) { if (event.getValue() == 1) { createGameTimer(VENGEANCE); } else { removeGameTimer(VENGEANCE); } } if (event.getVarbitId() == Varbits.SPELLBOOK_SWAP && config.showSpellbookSwap()) { if (event.getValue() == 1) { createGameTimer(SPELLBOOK_SWAP); } else { removeGameTimer(SPELLBOOK_SWAP); } } if (event.getVarbitId() == Varbits.HEAL_GROUP_COOLDOWN && config.showHealGroup()) { if (event.getValue() == 1) { createGameTimer(HEAL_GROUP); } else { removeGameTimer(HEAL_GROUP); } } if (event.getVarbitId() == Varbits.DEATH_CHARGE_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(DEATH_CHARGE_COOLDOWN); } else { removeGameTimer(DEATH_CHARGE_COOLDOWN); } } if (event.getVarbitId() == Varbits.CORRUPTION_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(CORRUPTION_COOLDOWN); } else { removeGameTimer(CORRUPTION_COOLDOWN); } } if (event.getVarbitId() == Varbits.RESURRECT_THRALL_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(RESURRECT_THRALL_COOLDOWN); } else { removeGameTimer(RESURRECT_THRALL_COOLDOWN); } } if (event.getVarbitId() == Varbits.SHADOW_VEIL_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(SHADOW_VEIL_COOLDOWN); } else { removeGameTimer(SHADOW_VEIL_COOLDOWN); } } if (event.getVarbitId() == Varbits.WARD_OF_ARCEUUS_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(WARD_OF_ARCEUUS_COOLDOWN); } else { removeGameTimer(WARD_OF_ARCEUUS_COOLDOWN); } } if (event.getVarbitId() == Varbits.VENGEANCE_ACTIVE && config.showVengeanceActive()) { updateVarCounter(VENGEANCE_ACTIVE, event.getValue()); } if (event.getVarbitId() == Varbits.DEATH_CHARGE && config.showArceuus()) { if (event.getValue() == 1) { createGameTimer(DEATH_CHARGE, Duration.of(client.getRealSkillLevel(Skill.MAGIC), RSTimeUnit.GAME_TICKS)); } else { removeGameTimer(DEATH_CHARGE); } } if (event.getVarbitId() == Varbits.RESURRECT_THRALL && event.getValue() == 0 && config.showArceuus()) { removeGameTimer(RESURRECT_THRALL); } if (event.getVarbitId() == Varbits.SHADOW_VEIL && event.getValue() == 0 && config.showArceuus()) { removeGameTimer(SHADOW_VEIL); } if (event.getVarpId() == VarPlayer.POISON && config.showAntiPoison()) { final int poisonVarp = event.getValue(); final int tickCount = client.getTickCount(); if (poisonVarp == 0) { nextPoisonTick = -1; } else if (nextPoisonTick - tickCount <= 0) { nextPoisonTick = tickCount + POISON_TICK_LENGTH; } updateVarTimer(ANTIPOISON, event.getValue(), i -> i >= 0 || i < VENOM_VALUE_CUTOFF, i -> nextPoisonTick - tickCount + Math.abs((i + 1) * POISON_TICK_LENGTH)); updateVarTimer(ANTIVENOM, event.getValue(), i -> i >= VENOM_VALUE_CUTOFF, i -> nextPoisonTick - tickCount + Math.abs((i + 1 - VENOM_VALUE_CUTOFF) * POISON_TICK_LENGTH)); } if ((event.getVarbitId() == Varbits.NMZ_OVERLOAD_REFRESHES_REMAINING || event.getVarbitId() == Varbits.COX_OVERLOAD_REFRESHES_REMAINING) && config.showOverload()) { final int overloadVarb = event.getValue(); final int tickCount = client.getTickCount(); if (overloadVarb <= 0) { nextOverloadRefreshTick = -1; } else if (nextOverloadRefreshTick - tickCount <= 0) { nextOverloadRefreshTick = tickCount + OVERLOAD_TICK_LENGTH; } GameTimer overloadTimer = client.getVarbitValue(Varbits.IN_RAID) == 1 ? OVERLOAD_RAID : OVERLOAD; updateVarTimer(overloadTimer, overloadVarb, i -> nextOverloadRefreshTick - tickCount + (i - 1) * OVERLOAD_TICK_LENGTH); } if (event.getVarbitId() == Varbits.TELEBLOCK && config.showTeleblock()) { updateVarTimer(TELEBLOCK, event.getValue() - 100, i -> i <= 0, IntUnaryOperator.identity()); } if (event.getVarpId() == VarPlayer.CHARGE_GOD_SPELL && config.showCharge()) { updateVarTimer(CHARGE, event.getValue(), i -> i * 2); } if (event.getVarbitId() == Varbits.IMBUED_HEART_COOLDOWN && config.showImbuedHeart()) { updateVarTimer(IMBUEDHEART, event.getValue(), i -> i * 10); } if (event.getVarbitId() == Varbits.DRAGONFIRE_SHIELD_COOLDOWN && config.showDFSSpecial()) { updateVarTimer(DRAGON_FIRE_SHIELD, event.getValue(), i -> i * 8); } if (event.getVarpId() == LAST_HOME_TELEPORT && config.showHomeMinigameTeleports()) { checkTeleport(LAST_HOME_TELEPORT); } if (event.getVarpId() == LAST_MINIGAME_TELEPORT && config.showHomeMinigameTeleports()) { checkTeleport(LAST_MINIGAME_TELEPORT); } if (event.getVarbitId() == Varbits.RUN_SLOWED_DEPLETION_ACTIVE || event.getVarbitId() == Varbits.STAMINA_EFFECT || event.getVarbitId() == Varbits.RING_OF_ENDURANCE_EFFECT) { // staminaEffectActive is checked to match https://github.com/Joshua-F/cs2-scripts/blob/741271f0c3395048c1bad4af7881a13734516adf/scripts/%5Bproc%2Cbuff_bar_get_value%5D.cs2#L25 int staminaEffectActive = client.getVarbitValue(Varbits.RUN_SLOWED_DEPLETION_ACTIVE); int staminaPotionEffectVarb = client.getVarbitValue(Varbits.STAMINA_EFFECT); int enduranceRingEffectVarb = client.getVarbitValue(Varbits.RING_OF_ENDURANCE_EFFECT); final int totalStaminaEffect = staminaPotionEffectVarb + enduranceRingEffectVarb; if (staminaEffectActive == 1 && config.showStamina()) { updateVarTimer(STAMINA, totalStaminaEffect, i -> i * 10); } } if (event.getVarbitId() == Varbits.ANTIFIRE && config.showAntiFire()) { final int antifireVarb = event.getValue(); final int tickCount = client.getTickCount(); if (antifireVarb == 0) { nextAntifireTick = -1; } else if (nextAntifireTick - tickCount <= 0) { nextAntifireTick = tickCount + ANTIFIRE_TICK_LENGTH; } updateVarTimer(ANTIFIRE, antifireVarb, i -> nextAntifireTick - tickCount + (i - 1) * ANTIFIRE_TICK_LENGTH); } if (event.getVarbitId() == Varbits.SUPER_ANTIFIRE && config.showAntiFire()) { final int superAntifireVarb = event.getValue(); final int tickCount = client.getTickCount(); if (superAntifireVarb == 0) { nextSuperAntifireTick = -1; } else if (nextSuperAntifireTick - tickCount <= 0) { nextSuperAntifireTick = tickCount + SUPERANTIFIRE_TICK_LENGTH; } updateVarTimer(SUPERANTIFIRE, event.getValue(), i -> nextSuperAntifireTick - tickCount + (i - 1) * SUPERANTIFIRE_TICK_LENGTH); } if (event.getVarbitId() == Varbits.MAGIC_IMBUE && config.showMagicImbue()) { updateVarTimer(MAGICIMBUE, event.getValue(), i -> i * 10); } if (event.getVarbitId() == Varbits.DIVINE_SUPER_ATTACK && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_SUPER_COMBAT) > event.getValue()) { return; } updateVarTimer(DIVINE_SUPER_ATTACK, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_SUPER_STRENGTH && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_SUPER_COMBAT) > event.getValue()) { return; } updateVarTimer(DIVINE_SUPER_STRENGTH, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_SUPER_DEFENCE && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_SUPER_COMBAT) > event.getValue() || client.getVarbitValue(Varbits.DIVINE_BASTION) > event.getValue() || client.getVarbitValue(Varbits.DIVINE_BATTLEMAGE) > event.getValue() // When drinking a dose of moonlight potion while already under its effects, desync between // Varbits.MOONLIGHT_POTION and Varbits.DIVINE_SUPER_DEFENCE can occur, with the latter being 1 tick // greater || client.getVarbitValue(Varbits.MOONLIGHT_POTION) >= event.getValue()) { return; } if (client.getVarbitValue(Varbits.MOONLIGHT_POTION) < event.getValue()) { removeVarTimer(MOONLIGHT_POTION); } updateVarTimer(DIVINE_SUPER_DEFENCE, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_RANGING && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_BASTION) > event.getValue()) { return; } updateVarTimer(DIVINE_RANGING, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_MAGIC && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_BATTLEMAGE) > event.getValue()) { return; } updateVarTimer(DIVINE_MAGIC, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_SUPER_COMBAT && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_SUPER_ATTACK) == event.getValue()) { removeVarTimer(DIVINE_SUPER_ATTACK); } if (client.getVarbitValue(Varbits.DIVINE_SUPER_STRENGTH) == event.getValue()) { removeVarTimer(DIVINE_SUPER_STRENGTH); } if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == event.getValue()) { removeVarTimer(DIVINE_SUPER_DEFENCE); } updateVarTimer(DIVINE_SUPER_COMBAT, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_BASTION && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_RANGING) == event.getValue()) { removeVarTimer(DIVINE_RANGING); } if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == event.getValue()) { removeVarTimer(DIVINE_SUPER_DEFENCE); } updateVarTimer(DIVINE_BASTION, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_BATTLEMAGE && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_MAGIC) == event.getValue()) { removeVarTimer(DIVINE_MAGIC); } if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == event.getValue()) { removeVarTimer(DIVINE_SUPER_DEFENCE); } updateVarTimer(DIVINE_BATTLEMAGE, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.BUFF_STAT_BOOST && config.showOverload()) { updateVarTimer(SMELLING_SALTS, event.getValue(), i -> i * 25); } if (event.getVarbitId() == Varbits.MENAPHITE_REMEDY && config.showMenaphiteRemedy()) { updateVarTimer(MENAPHITE_REMEDY, event.getValue(), i -> i * 25); } if (event.getVarbitId() == Varbits.LIQUID_ADERNALINE_ACTIVE && event.getValue() == 0 && config.showLiquidAdrenaline()) { removeGameTimer(LIQUID_ADRENALINE); } if (event.getVarbitId() == Varbits.FARMERS_AFFINITY && config.showFarmersAffinity()) { updateVarTimer(FARMERS_AFFINITY, event.getValue(), i -> i * 20); } if (event.getVarbitId() == Varbits.GOD_WARS_ALTAR_COOLDOWN && config.showGodWarsAltar()) { updateVarTimer(GOD_WARS_ALTAR, event.getValue(), i -> i * 100); } if (event.getVarbitId() == Varbits.CURSE_OF_THE_MOONS && config.showCurseOfTheMoons()) { final int regionID = WorldPoint.fromLocal(client, client.getLocalPlayer().getLocalLocation()).getRegionID(); if (regionID == ECLIPSE_MOON_REGION_ID) { updateVarCounter(CURSE_OF_THE_MOONS_ECLIPSE, event.getValue()); } else { updateVarCounter(CURSE_OF_THE_MOONS_BLUE, event.getValue()); } } if (event.getVarbitId() == Varbits.COLOSSEUM_DOOM && config.showColosseumDoom()) { updateVarCounter(COLOSSEUM_DOOM, event.getValue()); } if (event.getVarbitId() == Varbits.MOONLIGHT_POTION && config.showMoonlightPotion()) { int moonlightValue = event.getValue(); // Increase the timer by 1 tick in case of desync due to drinking a dose of moonlight potion while already // under its effects. Otherwise, the timer would be 1 tick shorter than it is meant to be. if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == moonlightValue + 1) { moonlightValue++; } updateVarTimer(MOONLIGHT_POTION, moonlightValue, IntUnaryOperator.identity()); } }
@Test public void testCorruptionCooldown() { when(timersAndBuffsConfig.showArceuusCooldown()).thenReturn(true); VarbitChanged varbitChanged = new VarbitChanged(); varbitChanged.setVarbitId(Varbits.CORRUPTION_COOLDOWN); varbitChanged.setValue(1); timersAndBuffsPlugin.onVarbitChanged(varbitChanged); ArgumentCaptor<InfoBox> captor = ArgumentCaptor.forClass(InfoBox.class); verify(infoBoxManager).addInfoBox(captor.capture()); TimerTimer infoBox = (TimerTimer) captor.getValue(); assertEquals(GameTimer.CORRUPTION_COOLDOWN, infoBox.getTimer()); }
@Override public Set<DeviceId> getDevices(NetworkId networkId, NodeId nodeId) { Map<DeviceId, NodeId> masterMap = getMasterMap(networkId); Set<DeviceId> ids = new HashSet<>(); for (Map.Entry<DeviceId, NodeId> d : masterMap.entrySet()) { if (Objects.equals(d.getValue(), nodeId)) { ids.add(d.getKey()); } } return ids; }
@Test public void getDevices() { Set<DeviceId> d = Sets.newHashSet(VDID1, VDID2); put(VNID1, VDID1, N2, true, true); put(VNID1, VDID2, N2, true, true); put(VNID1, VDID3, N1, true, true); assertTrue("wrong devices", d.equals(sms.getDevices(VNID1, N2))); }
@Override public int get(PageId pageId, int pageOffset, ReadTargetBuffer buffer, CacheContext cacheContext) { ReadWriteLock pageLock = getPageLock(pageId); long pageSize = -1L; try (LockResource r = new LockResource(pageLock.readLock())) { PageInfo pageInfo; try (LockResource r2 = new LockResource(mPageMetaStore.getLock().readLock())) { pageInfo = mPageMetaStore.getPageInfo(pageId); //check if page exists and refresh LRU items } catch (PageNotFoundException e) { LOG.debug("get({},pageOffset={}) fails due to page not found", pageId, pageOffset); return 0; } pageSize = pageInfo.getPageSize(); } return get(pageId, pageOffset, (int) pageSize, buffer, cacheContext); }
@Test public void getNotExist() throws Exception { assertEquals(0, mCacheManager.get(PAGE_ID1, PAGE1.length, mBuf, 0)); }
@Override public List<String> filter(final ReadwriteSplittingDataSourceGroupRule rule, final List<String> toBeFilteredReadDataSources) { List<String> result = new LinkedList<>(toBeFilteredReadDataSources); result.removeIf(rule.getDisabledDataSourceNames()::contains); return result; }
@Test void assertDisableDataSource() { rule.disableDataSource("read_ds_0"); assertThat(new DisabledReadDataSourcesFilter().filter(rule, Arrays.asList("read_ds_0", "read_ds_1")), is(Collections.singletonList("read_ds_1"))); }
@Override protected byte[] serialize(Object object) { var bytes = new ByteArrayOutputStream(); try (var output = new ObjectOutputStream(bytes)) { output.writeObject(object); } catch (IOException e) { throw new UncheckedIOException("Failed to serialize " + object.getClass(), e); } return bytes.toByteArray(); }
@Test(dataProvider = "copier") public void serializable_fail(JavaSerializationCopier copier) { assertThrows(UncheckedIOException.class, () -> copier.serialize(new Object())); }
@GetMapping("/meta/delete") public Mono<String> clean(@RequestParam("id") final String id, @RequestParam("rpcType") final String rpcType, @RequestParam("path") final String path) { if (CollectionUtils.isEmpty(subscribers)) { return Mono.just(Constants.SUCCESS); } LOG.info("delete apache shenyu local meta data"); MetaData metaData = MetaData.builder().id(id).rpcType(rpcType).path(path).build(); subscribers.forEach(metaDataSubscriber -> metaDataSubscriber.unSubscribe(metaData)); return Mono.just(Constants.SUCCESS); }
@Test public void testClean() throws Exception { final MockHttpServletResponse response = this.mockMvc.perform(MockMvcRequestBuilders.get("/shenyu/meta/delete") .contentType(MediaType.APPLICATION_JSON) .param("id", "id") .param("path", "path") .param("rpcType", "rpcType")) .andReturn().getResponse(); assertThat(response.getStatus()).isEqualTo(HttpStatus.OK.value()); MetaData metaData = new MetaData(); metaData.setId("id"); metaData.setPath("path"); metaData.setRpcType("rpcType"); subscribers.forEach(subscriber -> verify(subscriber).unSubscribe(metaData)); final MockHttpServletResponse subNullResponse = this.mockMvcSubscribersNull.perform(MockMvcRequestBuilders.get("/shenyu/meta/delete") .contentType(MediaType.APPLICATION_JSON) .param("id", "id") .param("path", "path") .param("rpcType", "rpcType")) .andReturn().getResponse(); assertThat(subNullResponse.getStatus()).isEqualTo(HttpStatus.OK.value()); }
@Override public RetrievableStateHandle<T> addAndLock(String key, T state) throws PossibleInconsistentStateException, Exception { checkNotNull(key, "Key in ConfigMap."); checkNotNull(state, "State."); final RetrievableStateHandle<T> storeHandle = storage.store(state); final byte[] serializedStoreHandle = serializeOrDiscard(new StateHandleWithDeleteMarker<>(storeHandle)); // initialize flag to serve the failure case boolean discardState = true; try { // a successful operation will result in the state not being discarded discardState = !updateConfigMap( cm -> { try { return addEntry(cm, key, serializedStoreHandle); } catch (Exception e) { throw new CompletionException(e); } }) .get(); return storeHandle; } catch (Exception ex) { final Optional<PossibleInconsistentStateException> possibleInconsistentStateException = ExceptionUtils.findThrowable(ex, PossibleInconsistentStateException.class); if (possibleInconsistentStateException.isPresent()) { // it's unclear whether the state handle metadata was written to the ConfigMap - // hence, we don't discard the data discardState = false; throw possibleInconsistentStateException.get(); } throw ExceptionUtils.findThrowable(ex, AlreadyExistException.class) .orElseThrow(() -> ex); } finally { if (discardState) { storeHandle.discardState(); } } }
@Test void testAddAndLockWithExistingKey() throws Exception { new Context() { { runTest( () -> { leaderCallbackGrantLeadership(); final TestingLongStateHandleHelper.LongStateHandle oldState = addRegularEntry(getLeaderConfigMap(), key, 1337L); final KubernetesStateHandleStore< TestingLongStateHandleHelper.LongStateHandle> store = new KubernetesStateHandleStore<>( flinkKubeClient, LEADER_CONFIGMAP_NAME, longStateStorage, filter, LOCK_IDENTITY); final TestingLongStateHandleHelper.LongStateHandle newState = new TestingLongStateHandleHelper.LongStateHandle(12345L); final String msg = String.format( "%s already exists in ConfigMap %s", key, LEADER_CONFIGMAP_NAME); assertThatThrownBy( () -> store.addAndLock(key, newState), "Exception should be thrown.") .satisfies( anyCauseMatches( StateHandleStore.AlreadyExistException.class, msg)); // Both initial & new handles should be in the storage (we never clean // it for testing). assertThat(TestingLongStateHandleHelper.getGlobalStorageSize()) .isEqualTo(2); // Only the new one (second entry in the store) should have been // discarded. assertThat(oldState.isDiscarded()).isFalse(); assertThat(newState.isDiscarded()).isTrue(); }); } }; }
@SuppressWarnings("unchecked") public static <T> AttributeKey<T> valueOf(String name) { return (AttributeKey<T>) pool.valueOf(name); }
@Test public void testValueOf() { String name = "test1"; assertFalse(AttributeKey.exists(name)); AttributeKey<String> attr = AttributeKey.valueOf(name); AttributeKey<String> attr2 = AttributeKey.valueOf(name); assertSame(attr, attr2); }
public String scheduleRecurrently(String cron, JobLambda job) { return scheduleRecurrently(null, cron, job); }
@Test void onRecurringJobCreatingAndCreatedAreCalled() { when(storageProvider.saveRecurringJob(any(RecurringJob.class))).thenAnswer(invocation -> invocation.getArgument(0)); jobScheduler.scheduleRecurrently(Cron.daily(), () -> testService.doWork()); assertThat(jobClientLogFilter.onCreating).isTrue(); assertThat(jobClientLogFilter.onCreated).isTrue(); }
public static AddressMatcher getAddressMatcher(String address) { final AddressMatcher matcher; final int indexColon = address.indexOf(':'); final int lastIndexColon = address.lastIndexOf(':'); final int indexDot = address.indexOf('.'); final int lastIndexDot = address.lastIndexOf('.'); if (indexColon > -1 && lastIndexColon > indexColon) { if (indexDot == -1) { matcher = new Ip6AddressMatcher(); parseIpv6(matcher, address); } else { // IPv4 mapped IPv6 if (indexDot >= lastIndexDot) { throw new InvalidAddressException(address); } final int lastIndexColon2 = address.lastIndexOf(':'); final String host2 = address.substring(lastIndexColon2 + 1); matcher = new Ip4AddressMatcher(); parseIpv4(matcher, host2); } } else if (indexDot > -1 && lastIndexDot > indexDot && indexColon == -1) { // IPv4 matcher = new Ip4AddressMatcher(); parseIpv4(matcher, address); } else { throw new InvalidAddressException(address); } return matcher; }
@Test public void testAddressMatcherFail() { try { AddressUtil.getAddressMatcher("fe80::62c5:47ff::fe05:480a%en0"); fail(); } catch (Exception e) { assertTrue(e instanceof InvalidAddressException); } try { AddressUtil.getAddressMatcher("fe80:62c5:47ff:fe05:480a%en0"); fail(); } catch (Exception e) { assertTrue(e instanceof InvalidAddressException); } try { AddressUtil.getAddressMatcher("[fe80:62c5:47ff:fe05:480a%en0"); fail(); } catch (Exception e) { assertTrue(e instanceof InvalidAddressException); } try { AddressUtil.getAddressMatcher("::ffff.192.0.2.128"); fail(); } catch (Exception e) { assertTrue(e instanceof InvalidAddressException); } }
static String generateIndexName(String baseString) { return generateResourceId( baseString, ILLEGAL_INDEX_NAME_CHARS, REPLACE_INDEX_NAME_CHAR, MAX_INDEX_NAME_LENGTH, TIME_FORMAT); }
@Test public void testGenerateIndexNameShouldReplaceSpace() { String testBaseString = "Test DB Name"; String actual = generateIndexName(testBaseString); assertThat(actual).matches("test-db-name-\\d{8}-\\d{6}-\\d{6}"); }
public DrlxParseResult drlxParse(Class<?> patternType, String bindingId, String expression) { return drlxParse(patternType, bindingId, expression, false); }
@Test public void testImplicitCastExpressionWithOr() { SingleDrlxParseSuccess result = (SingleDrlxParseSuccess) parser.drlxParse(Object.class, "$o", "\"Mark\" == this.toString() || == this#Person.address.city"); Optional<Expression> implicitCastExpression = result.getImplicitCastExpression(); assertThat(implicitCastExpression.isPresent()).isTrue(); assertThat(implicitCastExpression.get().toString()).isEqualTo("_this instanceof Person"); // will be added as the first predicate // instanceof check is done after the first constraint assertThat(result.getExpr().toString()).isEqualTo("\"Mark\" == _this.toString() || _this instanceof " + Person.class.getCanonicalName() + " && \"Mark\" == ((" + Person.class.getCanonicalName() + ") _this).getAddress().getCity()"); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } CachedQueryEntry<?, ?> that = (CachedQueryEntry<?, ?>) o; return keyData.equals(that.keyData); }
@Test @SuppressWarnings("EqualsWithItself") public void testEquals_givenSameInstance_thenReturnTrue() { CachedQueryEntry entry1 = createEntry("key"); assertTrue(entry1.equals(entry1)); }
public String getRestartWorkflowId() { return getCurrentNode(restartConfig).getWorkflowId(); }
@Test public void testGetRestartWorkflowId() { RestartConfig config = RestartConfig.builder().addRestartNode("foo", 1, "bar").build(); RunRequest runRequest = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.RESTART_FROM_INCOMPLETE) .restartConfig(config) .build(); Assert.assertEquals("foo", runRequest.getRestartWorkflowId()); }
@Override public Path touch(final Path file, final TransferStatus status) throws BackgroundException { return super.touch(file, status.withChecksum(write.checksum(file, status).compute(new NullInputStream(0L), status))); }
@Test public void testSuccessWithServerSideEncryptionBucketPolicy() throws Exception { final Path container = new Path("sse-test-us-east-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(container, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file)); final S3TouchFeature touch = new S3TouchFeature(session, new S3AccessControlListFeature(session)); final TransferStatus status = new TransferStatus(); status.setEncryption(S3EncryptionFeature.SSE_AES256); touch.touch(test, status); }
@SuppressWarnings({ "nullness" // TODO(https://github.com/apache/beam/issues/20497) }) @Override protected SchemaTransform from(KafkaReadSchemaTransformConfiguration configuration) { return new KafkaReadSchemaTransform(configuration); }
@Test public void testBuildTransformWithRawFormat() { ServiceLoader<SchemaTransformProvider> serviceLoader = ServiceLoader.load(SchemaTransformProvider.class); List<SchemaTransformProvider> providers = StreamSupport.stream(serviceLoader.spliterator(), false) .filter(provider -> provider.getClass() == KafkaReadSchemaTransformProvider.class) .collect(Collectors.toList()); KafkaReadSchemaTransformProvider kafkaProvider = (KafkaReadSchemaTransformProvider) providers.get(0); kafkaProvider.from( KafkaReadSchemaTransformConfiguration.builder() .setTopic("anytopic") .setBootstrapServers("anybootstrap") .setFormat("RAW") .build()); }
public static GenericRecord convertToAvro(Schema schema, Message message) { return AvroSupport.convert(schema, message); }
@Test public void recursiveSchema_noOverflow() throws IOException { Schema.Parser parser = new Schema.Parser(); Schema convertedSchema = parser.parse(getClass().getClassLoader().getResourceAsStream("schema-provider/proto/parent_schema_recursive_depth_2.avsc")); Pair<Parent, GenericRecord> inputAndOutput = createInputOutputForRecursiveSchemaNoOverflow(convertedSchema); GenericRecord actual = serializeAndDeserializeAvro(ProtoConversionUtil.convertToAvro(convertedSchema, inputAndOutput.getLeft()), convertedSchema); Assertions.assertEquals(inputAndOutput.getRight(), actual); }
public void runExtractor(Message msg) { try(final Timer.Context ignored = completeTimer.time()) { final String field; try (final Timer.Context ignored2 = conditionTimer.time()) { // We can only work on Strings. if (!(msg.getField(sourceField) instanceof String)) { conditionMissesCounter.inc(); return; } field = (String) msg.getField(sourceField); // Decide if to extract at all. if (conditionType.equals(ConditionType.STRING)) { if (field.contains(conditionValue)) { conditionHitsCounter.inc(); } else { conditionMissesCounter.inc(); return; } } else if (conditionType.equals(ConditionType.REGEX)) { if (regexConditionPattern.matcher(field).find()) { conditionHitsCounter.inc(); } else { conditionMissesCounter.inc(); return; } } } try (final Timer.Context ignored2 = executionTimer.time()) { Result[] results; try { results = run(field); } catch (ExtractorException e) { final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>"; msg.addProcessingError(new Message.ProcessingError( ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e))); return; } if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) { return; } else if (results.length == 1 && results[0].target == null) { // results[0].target is null if this extractor cannot produce multiple fields use targetField in that case msg.addField(targetField, results[0].getValue()); } else { for (final Result result : results) { msg.addField(result.getTarget(), result.getValue()); } } // Remove original from message? if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) { final StringBuilder sb = new StringBuilder(field); final List<Result> reverseList = Arrays.stream(results) .sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed()) .collect(Collectors.toList()); // remove all from reverse so that the indices still match for (final Result result : reverseList) { sb.delete(result.getBeginIndex(), result.getEndIndex()); } final String builtString = sb.toString(); final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString; msg.removeField(sourceField); // TODO don't add an empty field back, or rather don't add fullyCutByExtractor msg.addField(sourceField, finalResult); } runConverters(msg); } } }
@Test public void testCursorStrategyCutIfSourceFieldIsReservedField() throws Exception { final TestExtractor extractor = new TestExtractor.Builder() .cursorStrategy(CUT) .sourceField("message") .callback(new Callable<Result[]>() { @Override public Result[] call() throws Exception { return new Result[]{ new Result("the", 0, 3) }; } }) .build(); final Message msg = createMessage("the hello"); extractor.runExtractor(msg); // The source value is not modified if it is a reserved field. assertThat(msg.getField("message")).isEqualTo("the hello"); }
@Override public void resetConfigStats(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_RESETSTAT); syncFuture(f); }
@Test public void testResetConfigStats() { RedisClusterNode master = getFirstMaster(); connection.resetConfigStats(master); }
static int dissectSocketAddress(final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = 0; final int port = buffer.getInt(offset + encodedLength, LITTLE_ENDIAN); encodedLength += SIZE_OF_INT; encodedLength += dissectInetAddress(buffer, offset + encodedLength, builder); builder.append(':').append(port); return encodedLength; }
@Test void dissectSocketAddressInvalidLength() { final int offset = 16; buffer.putInt(offset, 555, LITTLE_ENDIAN); buffer.putInt(offset + SIZE_OF_INT, 7, LITTLE_ENDIAN); final int decodedLength = CommonEventDissector.dissectSocketAddress(buffer, offset, builder); assertEquals(15, decodedLength); assertEquals("unknown-address:555", builder.toString()); }
@Override public void clearReferrerWhenAppEnd() { }
@Test public void clearReferrerWhenAppEnd() { mSensorsAPI.clearReferrerWhenAppEnd(); Assert.assertNull(mSensorsAPI.getLastScreenUrl()); }
public static <K, V> boolean isNullOrEmpty(Map<K, V> map) { return map == null || map.isEmpty(); }
@Test public void isNullOrEmpty_whenNull() { assertTrue(MapUtil.isNullOrEmpty(null)); }
@Override public int compare(final String s1, final String s2) { return collator.compare(s1, s2); }
@Test public void testCompare() { assertEquals(-1, new NaturalOrderComparator().compare("123a", "a")); assertEquals(-1, new NaturalOrderComparator().compare("365", "400")); }
@Override public void finished(boolean allStepsExecuted) { if (postProjectAnalysisTasks.length == 0) { return; } ProjectAnalysisImpl projectAnalysis = createProjectAnalysis(allStepsExecuted ? SUCCESS : FAILED); for (PostProjectAnalysisTask postProjectAnalysisTask : postProjectAnalysisTasks) { executeTask(projectAnalysis, postProjectAnalysisTask); } }
@Test @UseDataProvider("booleanValues") public void does_not_fail_when_there_is_no_PostProjectAnalysisTasksExecutor(boolean allStepsExecuted) { new PostProjectAnalysisTasksExecutor(ceTask, analysisMetadataHolder, qualityGateHolder, qualityGateStatusHolder, reportReader, null) .finished(allStepsExecuted); }
public void resetTotalTime() { if (isTotalTimeExpired()) { totalTime.set(0L); lastResetTime = System.currentTimeMillis(); } }
@Test void testResetTotalTime() { TimeoutUtils timeoutUtils = new TimeoutUtils(10, -1); timeoutUtils.initLastResetTime(); timeoutUtils.addTotalTime(1); assertEquals(1L, timeoutUtils.getTotalTime().get()); timeoutUtils.resetTotalTime(); assertEquals(0L, timeoutUtils.getTotalTime().get()); }
protected final AnyKeyboardViewBase getMiniKeyboard() { return mMiniKeyboard; }
@Test public void testShortPressWhenNoPrimaryKeyButTextWithoutPopupShouldOutputText() throws Exception { ExternalAnyKeyboard anyKeyboard = new ExternalAnyKeyboard( new DefaultAddOn(getApplicationContext(), getApplicationContext()), getApplicationContext(), keyboard_with_keys_with_no_codes, keyboard_with_keys_with_no_codes, "test", 0, 0, "en", "", "", KEYBOARD_ROW_MODE_NORMAL); anyKeyboard.loadKeyboard(mViewUnderTest.mKeyboardDimens); mViewUnderTest.setKeyboard(anyKeyboard, 0); final AnyKeyboard.AnyKey key = (AnyKeyboard.AnyKey) anyKeyboard.getKeys().get(5); Assert.assertEquals(0, key.getPrimaryCode()); Assert.assertEquals(0, key.getCodesCount()); Assert.assertEquals(0, key.popupResId); Assert.assertEquals("text", key.label); Assert.assertNull(key.popupCharacters); ViewTestUtils.navigateFromTo(mViewUnderTest, key, key, 30, true, false); Assert.assertNull(mViewUnderTest.getMiniKeyboard()); Assert.assertFalse(mViewUnderTest.mMiniKeyboardPopup.isShowing()); ViewTestUtils.navigateFromTo(mViewUnderTest, key, key, 10, false, true); Mockito.verify(mMockKeyboardListener, Mockito.never()) .onKey( anyInt(), nullable(Keyboard.Key.class), anyInt(), Mockito.nullable(int[].class), Mockito.anyBoolean()); Mockito.verify(mMockKeyboardListener).onText(same(key), eq("texting")); }
@Override public List<Bundle> installBundle(String bundleLocation) throws BundleException { throw newException(); }
@Test void require_that_installBundle_throws_exception() throws BundleException { assertThrows(RuntimeException.class, () -> { new DisableOsgiFramework().installBundle("foo"); }); }
public static String getShortenedStackTrace(Throwable t) { StringBuilder trace = new StringBuilder(); final List<Throwable> causalChain = Throwables.getCausalChain(t) .stream() .filter(c -> StringUtils.isNotBlank(c.getMessage())) .collect(Collectors.toList()); int position = 0; for (Throwable c : causalChain) { if (position > 0) { trace.append("Caused by: "); } appendWithNewline(trace, c); Arrays.stream(c.getStackTrace()).findFirst().ifPresent(firstStackElement -> { trace.append("\tat "); appendWithNewline(trace, firstStackElement); final int more = c.getStackTrace().length - 1; if (more > 0) { trace.append("\t... ").append(more); appendWithNewline(trace, " more"); } }); position++; } return trace.toString(); }
@Test public void getShortenedStackTrace2More() { final IOException ioException = new IOException("io message"); final StackTraceElement traceElement = new StackTraceElement("FileReader", "process", "FileReader.java", 42); ioException.setStackTrace(new StackTraceElement[]{traceElement, traceElement, traceElement}); final String shortTrace = ExceptionUtils.getShortenedStackTrace(ioException); final String expected = "java.io.IOException: io message\n" + "\tat FileReader.process(FileReader.java:42)\n" + "\t... 2 more\n"; assertThat(shortTrace).isEqualTo(expected); }
@Override public ValidationResult validate(Object value) { ValidationResult result = super.validate(value); if (result instanceof ValidationResult.ValidationPassed) { final String sValue = (String) value; if (sValue != null && sValue.length() > maxLength) { result = new ValidationResult.ValidationFailed("Value is longer than " + maxLength + " characters!"); } } return result; }
@Test public void testValidateLongString() { assertThat(new LimitedOptionalStringValidator(1).validate("12")) .isInstanceOf(ValidationResult.ValidationFailed.class); }
public static String toUpperCamel(String src) { return toUnderline(src, true); }
@Test public void testToUpperCamel() { String result = FieldUtils.toUpperCamel("ToUpperCamel"); Assert.assertEquals("TO_UPPER_CAMEL", result); }
public void shutdown() { DefaultMetricsSystem.shutdown(); }
@Test public void testEditLogTailing() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, 60); MiniDFSCluster dfsCluster = null; try { dfsCluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .build(); DistributedFileSystem dfs = dfsCluster.getFileSystem(0); dfsCluster.transitionToActive(0); dfsCluster.waitActive(); Path testDir = new Path("/testdir"); dfs.mkdir(testDir, FsPermission.getDefault()); dfsCluster.getNameNodeRpc(0).rollEditLog(); Thread.sleep(2 * 1000); // We need to get the metrics for the SBN (excluding the NN from dfs // cluster created in setUp() and the ANN). MetricsRecordBuilder rb = getMetrics(NN_METRICS+"-2"); assertQuantileGauges("EditLogTailTime60s", rb); assertQuantileGauges("EditLogFetchTime60s", rb); assertQuantileGauges("NumEditLogLoaded60s", rb, "Count"); assertQuantileGauges("EditLogTailInterval60s", rb); assertCounterGt("EditLogTailTimeNumOps", 0L, rb); assertCounterGt("EditLogFetchTimeNumOps", 0L, rb); assertCounterGt("NumEditLogLoadedNumOps", 0L, rb); assertCounterGt("EditLogTailIntervalNumOps", 0L, rb); } finally { if (dfsCluster != null) { dfsCluster.shutdown(); } } }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldIncludePathForErrorsInMapValues() { // Given: final KsqlJsonDeserializer<Map> deserializer = givenDeserializerForSchema( SchemaBuilder .map(Schema.OPTIONAL_STRING_SCHEMA, Schema.INT32_SCHEMA) .build(), Map.class ); final byte[] bytes = serializeJson(ImmutableMap.of("a", 1, "b", true)); // When: final Exception e = assertThrows( SerializationException.class, () -> deserializer.deserialize(SOME_TOPIC, bytes) ); // Then: assertThat(e.getCause(), (hasMessage(endsWith("path: $.b.value")))); }
@VisibleForTesting Map<String, Object> getCustomMessageModel(EventNotificationContext ctx, String type, List<MessageSummary> backlog, DateTimeZone timeZone) { EventNotificationModelData modelData = EventNotificationModelData.of(ctx, backlog); LOG.debug("the custom message model data is {}", modelData); Map<String, Object> objectMap = objectMapperProvider.getForTimeZone(timeZone).convertValue(modelData, TypeReferences.MAP_STRING_OBJECT); objectMap.put("type", type); objectMap.put("http_external_uri", this.httpExternalUri); return objectMap; }
@Test public void getCustomMessageModel() { List<MessageSummary> messageSummaries = generateMessageSummaries(50); Map<String, Object> customMessageModel = teamsEventNotification.getCustomMessageModel(eventNotificationContext, teamsEventNotificationConfig.type(), messageSummaries, DateTimeZone.UTC); //there are 9 keys and two asserts needs to be implemented (backlog,event) assertThat(customMessageModel).isNotNull(); assertThat(customMessageModel.get("event_definition_description")).isEqualTo("Event Definition Test Description"); assertThat(customMessageModel.get("event_definition_title")).isEqualTo("Event Definition Test Title"); assertThat(customMessageModel.get("event_definition_type")).isEqualTo("test-dummy-v1"); assertThat(customMessageModel.get("type")).isEqualTo("teams-notification-v1"); assertThat(customMessageModel.get("job_definition_id")).isEqualTo("<unknown>"); assertThat(customMessageModel.get("job_trigger_id")).isEqualTo("<unknown>"); }
void fetchUpdateCheckHeaders(DownloadableFile downloadableFile) throws IOException, GeneralSecurityException { String url = downloadableFile.validatedUrl(urlGenerator); final HttpRequestBase request = new HttpHead(url); request.setConfig(RequestConfig.custom().setConnectTimeout(HTTP_TIMEOUT_IN_MILLISECONDS).build()); try ( CloseableHttpClient httpClient = httpClientBuilder.build(); CloseableHttpResponse response = httpClient.execute(request) ) { handleInvalidResponse(response, url); this.md5 = response.getFirstHeader(MD5_HEADER).getValue(); this.extraProperties = HeaderUtil.parseExtraProperties(response.getFirstHeader(AGENT_EXTRA_PROPERTIES_HEADER)); } }
@Test public void shouldFailIfMD5HeadersAreMissing() { ServerBinaryDownloader downloader = new ServerBinaryDownloader(new GoAgentServerHttpClientBuilder(null, SslVerificationMode.NONE, null, null, null), ServerUrlGeneratorMother.generatorWithoutSubPathFor("https://localhost:" + server.getSecurePort() + "/go/hello")); assertThatThrownBy(() -> downloader.fetchUpdateCheckHeaders(DownloadableFile.AGENT)) .isInstanceOf(IOException.class) .hasMessageContaining("Missing required headers 'Content-MD5' in response."); }
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) { log.info("Starting to validate internal topics {}.", topicConfigs.keySet()); final long now = time.milliseconds(); final long deadline = now + retryTimeoutMs; final ValidationResult validationResult = new ValidationResult(); final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet()); final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet()); while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) { Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap(); if (!topicDescriptionsStillToValidate.isEmpty()) { final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate); descriptionsForTopic = describeTopicsResult.topicNameValues(); } Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap(); if (!topicConfigsStillToValidate.isEmpty()) { final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs( topicConfigsStillToValidate.stream() .map(topic -> new ConfigResource(Type.TOPIC, topic)) .collect(Collectors.toSet()) ); configsForTopic = describeConfigsResult.values().entrySet().stream() .collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue)); } while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { if (!descriptionsForTopic.isEmpty()) { doValidateTopic( validationResult, descriptionsForTopic, topicConfigs, topicDescriptionsStillToValidate, (streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide) ); } if (!configsForTopic.isEmpty()) { doValidateTopic( validationResult, configsForTopic, topicConfigs, topicConfigsStillToValidate, (streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide) ); } maybeThrowTimeoutException( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, String.format("Could not validate internal topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs) ); if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { Utils.sleep(100); } } maybeSleep( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, "validated" ); } log.info("Completed validation of internal topics {}.", topicConfigs.keySet()); return validationResult; }
@Test public void shouldReportMisconfigurationsOfCleanupPolicyForVersionedChangelogTopics() { final long compactionLagMs = 1000; final long shorterCompactionLagMs = 900; setupTopicInMockAdminClient(topic1, versionedChangelogConfig(compactionLagMs)); setupTopicInMockAdminClient(topic2, versionedChangelogConfig(shorterCompactionLagMs)); final Map<String, String> versionedChangelogConfigCleanupPolicyDelete = versionedChangelogConfig(compactionLagMs); versionedChangelogConfigCleanupPolicyDelete.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE); setupTopicInMockAdminClient(topic3, versionedChangelogConfigCleanupPolicyDelete); final Map<String, String> versionedChangelogConfigCleanupPolicyCompactAndDelete = versionedChangelogConfig(compactionLagMs); versionedChangelogConfigCleanupPolicyCompactAndDelete.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT + TopicConfig.CLEANUP_POLICY_DELETE); setupTopicInMockAdminClient(topic4, versionedChangelogConfigCleanupPolicyCompactAndDelete); final InternalTopicConfig internalTopicConfig1 = setupVersionedChangelogTopicConfig(topic1, 1, compactionLagMs); final InternalTopicConfig internalTopicConfig2 = setupVersionedChangelogTopicConfig(topic2, 1, compactionLagMs); final InternalTopicConfig internalTopicConfig3 = setupVersionedChangelogTopicConfig(topic3, 1, compactionLagMs); final InternalTopicConfig internalTopicConfig4 = setupVersionedChangelogTopicConfig(topic4, 1, compactionLagMs); final ValidationResult validationResult = internalTopicManager.validate(mkMap( mkEntry(topic1, internalTopicConfig1), mkEntry(topic2, internalTopicConfig2), mkEntry(topic3, internalTopicConfig3), mkEntry(topic4, internalTopicConfig4) )); final Map<String, List<String>> misconfigurationsForTopics = validationResult.misconfigurationsForTopics(); assertThat(validationResult.missingTopics(), empty()); assertThat(misconfigurationsForTopics.size(), is(3)); assertThat(misconfigurationsForTopics, hasKey(topic2)); assertThat(misconfigurationsForTopics.get(topic2).size(), is(1)); assertThat( misconfigurationsForTopics.get(topic2).get(0), is("Min compaction lag (" + TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG + ") of existing internal topic " + topic2 + " is " + shorterCompactionLagMs + " but should be " + compactionLagMs + " or larger.") ); assertThat(misconfigurationsForTopics, hasKey(topic3)); assertThat(misconfigurationsForTopics.get(topic3).size(), is(1)); assertThat( misconfigurationsForTopics.get(topic3).get(0), is("Cleanup policy (" + TopicConfig.CLEANUP_POLICY_CONFIG + ") of existing internal topic " + topic3 + " should not contain \"" + TopicConfig.CLEANUP_POLICY_DELETE + "\".") ); assertThat(misconfigurationsForTopics, hasKey(topic4)); assertThat(misconfigurationsForTopics.get(topic4).size(), is(1)); assertThat( misconfigurationsForTopics.get(topic4).get(0), is("Cleanup policy (" + TopicConfig.CLEANUP_POLICY_CONFIG + ") of existing internal topic " + topic4 + " should not contain \"" + TopicConfig.CLEANUP_POLICY_DELETE + "\".") ); assertThat(misconfigurationsForTopics, not(hasKey(topic1))); }
public boolean overlap(final Window other) throws IllegalArgumentException { if (getClass() != other.getClass()) { throw new IllegalArgumentException("Cannot compare windows of different type. Other window has type " + other.getClass() + "."); } final SessionWindow otherWindow = (SessionWindow) other; return !(otherWindow.endMs < startMs || endMs < otherWindow.startMs); }
@Test public void shouldOverlapIfOtherWindowContainsThisWindow() { /* * This: [-------] * Other: [------------------] */ assertTrue(window.overlap(new SessionWindow(0, end))); assertTrue(window.overlap(new SessionWindow(0, end + 1))); assertTrue(window.overlap(new SessionWindow(0, 150))); assertTrue(window.overlap(new SessionWindow(start - 1, end))); assertTrue(window.overlap(new SessionWindow(start - 1, end + 1))); assertTrue(window.overlap(new SessionWindow(start - 1, 150))); assertTrue(window.overlap(new SessionWindow(start, end))); assertTrue(window.overlap(new SessionWindow(start, end + 1))); assertTrue(window.overlap(new SessionWindow(start, 150))); }
@Override public Schema toConnectSchema(final ParsedSchema schema) { return protobufData.toConnectSchema(withSchemaFullName((ProtobufSchema) schema)); }
@Test public void shouldWrapPrimitives() { // Given: givenWrapPrimitives(); // When: final Schema schema = schemaTranslator.toConnectSchema(SCHEMA_WITH_WRAPPED_PRIMITIVES); // Then: assertThat(schema.field("c1").schema().type(), is(Type.STRUCT)); assertThat(schema.field("c1").schema().field("value").schema().type(), is(Type.BOOLEAN)); assertThat(schema.field("c2").schema().type(), is(Type.STRUCT)); assertThat(schema.field("c2").schema().field("value").schema().type(), is(Type.INT32)); assertThat(schema.field("c3").schema().type(), is(Type.STRUCT)); assertThat(schema.field("c3").schema().field("value").schema().type(), is(Type.INT64)); assertThat(schema.field("c4").schema().type(), is(Type.STRUCT)); assertThat(schema.field("c4").schema().field("value").schema().type(), is(Type.FLOAT64)); assertThat(schema.field("c5").schema().type(), is(Type.STRUCT)); assertThat(schema.field("c5").schema().field("value").schema().type(), is(Type.STRING)); }
public void getFields( RowMetaInterface row, String name, RowMetaInterface[] info, StepMeta nextStep, VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { if ( !Utils.isEmpty( resultfieldname ) ) { ValueMetaInterface v = new ValueMetaBoolean( space.environmentSubstitute( resultfieldname ) ); v.setOrigin( name ); row.addValueMeta( v ); } }
@Test public void testGetFields() throws KettleStepException { DetectLastRowMeta meta = new DetectLastRowMeta(); meta.setDefault(); meta.setResultFieldName( "The Result" ); RowMeta rowMeta = new RowMeta(); meta.getFields( rowMeta, "this step", null, null, new Variables(), null, null ); assertEquals( 1, rowMeta.size() ); assertEquals( "The Result", rowMeta.getValueMeta( 0 ).getName() ); assertEquals( ValueMetaInterface.TYPE_BOOLEAN, rowMeta.getValueMeta( 0 ).getType() ); }
@Override public Mono<AuthProvider> enable(String name) { return client.get(AuthProvider.class, name) .flatMap(authProvider -> updateAuthProviderEnabled(enabled -> enabled.add(name)) .thenReturn(authProvider) ); }
@Test void testEnable() { // Create a test auth provider AuthProvider authProvider = createAuthProvider("github"); when(client.get(eq(AuthProvider.class), eq("github"))).thenReturn(Mono.just(authProvider)); ArgumentCaptor<ConfigMap> captor = ArgumentCaptor.forClass(ConfigMap.class); when(client.update(captor.capture())).thenReturn(Mono.empty()); ConfigMap configMap = new ConfigMap(); configMap.setData(new HashMap<>()); when(client.fetch(eq(ConfigMap.class), eq(SystemSetting.SYSTEM_CONFIG))) .thenReturn(Mono.just(configMap)); AuthProvider local = createAuthProvider("local"); local.getMetadata().getLabels().put(AuthProvider.PRIVILEGED_LABEL, "true"); when(client.list(eq(AuthProvider.class), any(), any())).thenReturn(Flux.just(local)); // Call the method being tested Mono<AuthProvider> result = authProviderService.enable("github"); assertEquals(authProvider, result.block()); ConfigMap value = captor.getValue(); String providerSettingStr = value.getData().get(SystemSetting.AuthProvider.GROUP); Set<String> enabled = JsonUtils.jsonToObject(providerSettingStr, SystemSetting.AuthProvider.class) .getEnabled(); assertThat(enabled).containsExactly("github"); // Verify the result verify(client).get(AuthProvider.class, "github"); verify(client).fetch(eq(ConfigMap.class), eq(SystemSetting.SYSTEM_CONFIG)); }
@Override public ChannelFuture writeSettingsAck(ChannelHandlerContext ctx, ChannelPromise promise) { ChannelPromise newPromise = handleOutstandingControlFrames(ctx, promise); if (newPromise == null) { return promise; } return super.writeSettingsAck(ctx, newPromise); }
@Test public void testLimitSettingsAck() { assertFalse(encoder.writeSettingsAck(ctx, newPromise()).isDone()); // The second write is always marked as success by our mock, which means it will also not be queued and so // not count to the number of queued frames. assertTrue(encoder.writeSettingsAck(ctx, newPromise()).isSuccess()); assertFalse(encoder.writeSettingsAck(ctx, newPromise()).isDone()); verifyFlushAndClose(0, false); assertFalse(encoder.writeSettingsAck(ctx, newPromise()).isDone()); assertFalse(encoder.writeSettingsAck(ctx, newPromise()).isDone()); verifyFlushAndClose(1, true); }
public Connection getConnection(String connectionId) { return connections.get(connectionId); }
@Test void testGetConnection() { assertEquals(connection, connectionManager.getConnection(connectId)); }
@Override public double variance() { return nu / (nu - 2.0); }
@Test public void testVariance() { System.out.println("variance"); TDistribution instance = new TDistribution(20); instance.rand(); assertEquals(10/9.0, instance.variance(), 1E-7); }
public static String hmacMd5Hex(final String key, final String valueToDigest) { return getHmacHex(HmacAlgorithms.HMAC_MD5, key, valueToDigest); }
@Test public void testHmacMd5Hex() { assertEquals(HmacHexUtils.hmacMd5Hex("testKey", "testValue"), "3024ffb5567372102ca6775cf8140cb1"); }
public List<Map<String, String>> asMaps() { return asMaps(String.class, String.class); }
@Test void asMaps_returns_maps_of_raw() { DataTable table = createSimpleNumberTable(); Map<String, String> expected = new HashMap<String, String>() { { put("1", "2"); put("100", "1000"); } }; assertEquals(singletonList(expected), table.asMaps()); }
@Override public Health check() { ClusterHealthResponse healthResponse = getEsClusterHealth(); return healthResponse != null ? extractStatusHealth(healthResponse) : RED_HEALTH_UNAVAILABLE; }
@Test public void check_returns_GREEN_without_cause_if_ES_cluster_status_is_GREEN() { when(esClient.clusterHealth(any()).getStatus()).thenReturn(ClusterHealthStatus.GREEN); Health health = underTest.check(); assertThat(health).isEqualTo(Health.GREEN); }
@Override public KeyValueIterator<K, V> reverseRange(final K from, final K to) { final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() { @Override public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) { try { return store.reverseRange(from, to); } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata."); } } }; final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); return new DelegatingPeekingKeyValueIterator<>( storeName, new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction)); }
@Test public void shouldThrowUnsupportedOperationExceptionWhileReverseRange() { stubOneUnderlying.put("a", "1"); stubOneUnderlying.put("b", "1"); try (final KeyValueIterator<String, String> keyValueIterator = theStore.reverseRange("a", "b")) { assertThrows(UnsupportedOperationException.class, keyValueIterator::remove); } }
public static boolean matchIpRange(String pattern, String host, int port) throws UnknownHostException { if (pattern == null || host == null) { throw new IllegalArgumentException( "Illegal Argument pattern or hostName. Pattern:" + pattern + ", Host:" + host); } pattern = pattern.trim(); if ("*.*.*.*".equals(pattern) || "*".equals(pattern)) { return true; } InetAddress inetAddress = InetAddress.getByName(host); boolean isIpv4 = isValidV4Address(inetAddress); String[] hostAndPort = getPatternHostAndPort(pattern, isIpv4); if (hostAndPort[1] != null && !hostAndPort[1].equals(String.valueOf(port))) { return false; } pattern = hostAndPort[0]; String splitCharacter = SPLIT_IPV4_CHARACTER; if (!isIpv4) { splitCharacter = SPLIT_IPV6_CHARACTER; } String[] mask = pattern.split(splitCharacter); // check format of pattern checkHostPattern(pattern, mask, isIpv4); host = inetAddress.getHostAddress(); if (pattern.equals(host)) { return true; } // short name condition if (!ipPatternContainExpression(pattern)) { InetAddress patternAddress = InetAddress.getByName(pattern); return patternAddress.getHostAddress().equals(host); } String[] ipAddress = host.split(splitCharacter); for (int i = 0; i < mask.length; i++) { if ("*".equals(mask[i]) || mask[i].equals(ipAddress[i])) { continue; } else if (mask[i].contains("-")) { String[] rangeNumStrs = StringUtils.split(mask[i], '-'); if (rangeNumStrs.length != 2) { throw new IllegalArgumentException("There is wrong format of ip Address: " + mask[i]); } Integer min = getNumOfIpSegment(rangeNumStrs[0], isIpv4); Integer max = getNumOfIpSegment(rangeNumStrs[1], isIpv4); Integer ip = getNumOfIpSegment(ipAddress[i], isIpv4); if (ip < min || ip > max) { return false; } } else if ("0".equals(ipAddress[i]) && ("0".equals(mask[i]) || "00".equals(mask[i]) || "000".equals(mask[i]) || "0000".equals(mask[i]))) { continue; } else if (!mask[i].equals(ipAddress[i])) { return false; } } return true; }
@Test void testMatchIpRangeMatchWhenIpv6() throws UnknownHostException { assertTrue(NetUtils.matchIpRange("*.*.*.*", "192.168.1.63", 90)); assertTrue(NetUtils.matchIpRange("234e:0:4567:0:0:0:3d:*", "234e:0:4567::3d:ff", 90)); assertTrue(NetUtils.matchIpRange("234e:0:4567:0:0:0:3d:ee", "234e:0:4567::3d:ee", 90)); assertTrue(NetUtils.matchIpRange("234e:0:4567::3d:ee", "234e:0:4567::3d:ee", 90)); assertTrue(NetUtils.matchIpRange("234e:0:4567:0:0:0:3d:0-ff", "234e:0:4567::3d:ee", 90)); assertTrue(NetUtils.matchIpRange("234e:0:4567:0:0:0:3d:0-ee", "234e:0:4567::3d:ee", 90)); assertFalse(NetUtils.matchIpRange("234e:0:4567:0:0:0:3d:ff", "234e:0:4567::3d:ee", 90)); assertFalse(NetUtils.matchIpRange("234e:0:4567:0:0:0:3d:0-ea", "234e:0:4567::3d:ee", 90)); }
@ConstantFunction.List(list = { @ConstantFunction(name = "adddate", argTypes = {DATETIME, INT}, returnType = DATETIME, isMonotonic = true), @ConstantFunction(name = "date_add", argTypes = {DATETIME, INT}, returnType = DATETIME, isMonotonic = true), @ConstantFunction(name = "days_add", argTypes = {DATETIME, INT}, returnType = DATETIME, isMonotonic = true) }) public static ConstantOperator daysAdd(ConstantOperator date, ConstantOperator day) { return ConstantOperator.createDatetimeOrNull(date.getDatetime().plusDays(day.getInt())); }
@Test public void daysAdd() { assertEquals("2015-04-02T09:23:55", ScalarOperatorFunctions.daysAdd(O_DT_20150323_092355, O_INT_10).getDatetime().toString()); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testFetchError() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NOT_LEADER_OR_FOLLOWER, 100L, 0)); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords(); assertFalse(partitionRecords.containsKey(tp0)); }
@SuppressWarnings("unchecked") public static <T> T newInstanceIfPossible(Class<T> type) { Assert.notNull(type); // 原始类型 if (type.isPrimitive()) { return (T) ClassUtil.getPrimitiveDefaultValue(type); } // 某些特殊接口的实例化按照默认实现进行 if (type.isAssignableFrom(AbstractMap.class)) { type = (Class<T>) HashMap.class; } else if (type.isAssignableFrom(List.class)) { type = (Class<T>) ArrayList.class; } else if (type.isAssignableFrom(Set.class)) { type = (Class<T>) HashSet.class; } try { return newInstance(type); } catch (Exception e) { // ignore // 默认构造不存在的情况下查找其它构造 } // 枚举 if (type.isEnum()) { return type.getEnumConstants()[0]; } // 数组 if (type.isArray()) { return (T) Array.newInstance(type.getComponentType(), 0); } final Constructor<T>[] constructors = getConstructors(type); Class<?>[] parameterTypes; for (Constructor<T> constructor : constructors) { parameterTypes = constructor.getParameterTypes(); if (0 == parameterTypes.length) { continue; } setAccessible(constructor); try { return constructor.newInstance(ClassUtil.getDefaultValues(parameterTypes)); } catch (Exception ignore) { // 构造出错时继续尝试下一种构造方式 } } return null; }
@Test public void newInstanceIfPossibleTest(){ //noinspection ConstantConditions final int intValue = ReflectUtil.newInstanceIfPossible(int.class); assertEquals(0, intValue); final Integer integer = ReflectUtil.newInstanceIfPossible(Integer.class); assertEquals(new Integer(0), integer); final Map<?, ?> map = ReflectUtil.newInstanceIfPossible(Map.class); assertNotNull(map); final Collection<?> collection = ReflectUtil.newInstanceIfPossible(Collection.class); assertNotNull(collection); final Week week = ReflectUtil.newInstanceIfPossible(Week.class); assertEquals(Week.SUNDAY, week); final int[] intArray = ReflectUtil.newInstanceIfPossible(int[].class); assertArrayEquals(new int[0], intArray); }
public static int durationStringShortToMs(String input, boolean durationIsInHours) { String[] parts = input.split(":"); if (parts.length != 2) { return 0; } int modifier = durationIsInHours ? 60 : 1; return Integer.parseInt(parts[0]) * 60 * 1000 * modifier + Integer.parseInt(parts[1]) * 1000 * modifier; }
@Test public void testDurationStringShortToMs() { String input = "8:30"; assertEquals(30600000, Converter.durationStringShortToMs(input, true)); assertEquals(510000, Converter.durationStringShortToMs(input, false)); }
public static CsvRecordConverter<List<String>> listConverter() { return ListCsvRecordConverter.SINGLETON; }
@Test void shouldConvertAsList() { List<String> list = CsvRecordConverters.listConverter().convertRecord(record); assertNotNull(list); assertEquals(3, list.size()); assertEquals("1", list.get(0)); assertEquals("2", list.get(1)); assertEquals("3", list.get(2)); }
@Override public FileClient getMasterFileClient() { return clientCache.getUnchecked(CACHE_MASTER_ID); }
@Test public void testGetMasterFileClient() { // mock 数据 FileConfigDO fileConfig = randomFileConfigDO().setMaster(true); fileConfigMapper.insert(fileConfig); // 准备参数 Long id = fileConfig.getId(); // mock 获得 Client FileClient fileClient = new LocalFileClient(id, new LocalFileClientConfig()); when(fileClientFactory.getFileClient(eq(fileConfig.getId()))).thenReturn(fileClient); // 调用,并断言 assertSame(fileClient, fileConfigService.getMasterFileClient()); // 断言缓存 verify(fileClientFactory).createOrUpdateFileClient(eq(fileConfig.getId()), eq(fileConfig.getStorage()), eq(fileConfig.getConfig())); }
@Subscribe public void handleDebugEvent(DebugEvent event) { LOG.debug("Received cluster debug event: {}", event); DebugEventHolder.setClusterDebugEvent(event); }
@Test public void testHandleDebugEvent() throws Exception { DebugEvent event = DebugEvent.create("Node ID", "Test"); assertThat(DebugEventHolder.getClusterDebugEvent()).isNull(); clusterEventBus.post(event); assertThat(DebugEventHolder.getClusterDebugEvent()).isSameAs(event); }
public static int calculateFor(final ConnectionSession connectionSession) { int result = 0; result |= connectionSession.isAutoCommit() ? MySQLStatusFlag.SERVER_STATUS_AUTOCOMMIT.getValue() : 0; result |= connectionSession.getTransactionStatus().isInTransaction() ? MySQLStatusFlag.SERVER_STATUS_IN_TRANS.getValue() : 0; return result; }
@Test void assertNotAutoCommitInTransaction() { when(connectionSession.getTransactionStatus().isInTransaction()).thenReturn(true); assertThat(ServerStatusFlagCalculator.calculateFor(connectionSession), is(MySQLStatusFlag.SERVER_STATUS_IN_TRANS.getValue())); }
public static Builder custom() { return new Builder(); }
@Test(expected = IllegalArgumentException.class) public void maxWaitDurationInHalfOpenStateLessThanSecondShouldFail() { custom().maxWaitDurationInHalfOpenState(Duration.ofMillis(-1)).build(); }
@Override public int run() throws IOException { Preconditions.checkArgument(sourceFiles != null && !sourceFiles.isEmpty(), "Missing file name"); // Ensure all source files have the columns specified first Map<String, Schema> schemas = new HashMap<>(); for (String sourceFile : sourceFiles) { Schema schema = getAvroSchema(sourceFile); schemas.put(sourceFile, Expressions.filterSchema(schema, columns)); } long totalStartTime = System.currentTimeMillis(); long totalCount = 0; for (String sourceFile : sourceFiles) { long startTime = System.currentTimeMillis(); Iterable<Object> reader = openDataFile(sourceFile, schemas.get(sourceFile)); boolean threw = true; long count = 0; try { for (Object record : reader) { count += 1; } threw = false; } catch (RuntimeException e) { throw new RuntimeException("Failed on record " + count + " in " + sourceFile, e); } finally { if (reader instanceof Closeable) { Closeables.close((Closeable) reader, threw); } } totalCount += count; if (1 < sourceFiles.size()) { long endTime = System.currentTimeMillis(); console.info("Scanned " + count + " records from " + sourceFile + " in " + (endTime - startTime) / 1000.0 + " s"); } } long totalEndTime = System.currentTimeMillis(); console.info("Scanned " + totalCount + " records from " + sourceFiles.size() + " file(s)"); console.info("Time: " + (totalEndTime - totalStartTime) / 1000.0 + " s"); return 0; }
@Test(expected = IllegalArgumentException.class) public void testScanCommandWithInvalidColumnName() throws IOException { File file = parquetFile(); ScanCommand command = new ScanCommand(createLogger()); command.sourceFiles = Arrays.asList(file.getAbsolutePath()); command.columns = Arrays.asList("invalid_field"); command.setConf(new Configuration()); command.run(); }
@Override public List<String> listPartitionColumns(Connection connection, String databaseName, String tableName) { String partitionColumnsQuery = "SELECT DISTINCT PARTITION_EXPRESSION FROM INFORMATION_SCHEMA.PARTITIONS " + "WHERE TABLE_SCHEMA = ? AND TABLE_NAME = ? AND PARTITION_NAME IS NOT NULL " + "AND ( PARTITION_METHOD = 'RANGE' or PARTITION_METHOD = 'RANGE COLUMNS') " + "AND PARTITION_EXPRESSION IS NOT NULL"; try (PreparedStatement ps = connection.prepareStatement(partitionColumnsQuery)) { ps.setString(1, databaseName); ps.setString(2, tableName); ResultSet rs = ps.executeQuery(); ImmutableList.Builder<String> list = ImmutableList.builder(); if (null != rs) { while (rs.next()) { String partitionColumn = rs.getString("PARTITION_EXPRESSION") .replace("`", ""); list.add(partitionColumn); } return list.build(); } else { return Lists.newArrayList(); } } catch (SQLException | NullPointerException e) { throw new StarRocksConnectorException(e.getMessage(), e); } }
@Test public void testListPartitionColumnsRsNull() { try { new Expectations() { { preparedStatement.executeQuery(); result = null; minTimes = 0; } }; JDBCMetadata jdbcMetadata = new JDBCMetadata(properties, "catalog", dataSource); Integer size = jdbcMetadata.listPartitionColumns("test", "tbl1", Arrays.asList(new Column("d", Type.VARCHAR))).size(); Assert.assertTrue(size == 0); } catch (Exception e) { System.out.println(e.getMessage()); Assert.fail(); } }