focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Operation(summary = "activateUser", description = "ACTIVATE_USER_NOTES") @Parameters({ @Parameter(name = "userName", description = "USER_NAME", schema = @Schema(implementation = String.class)), }) @PostMapping("/activate") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_USER_ERROR) public Result<Object> activateUser(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "userName") String userName) { userName = ParameterUtils.handleEscapes(userName); Map<String, Object> result = usersService.activateUser(loginUser, userName); return returnDataList(result); }
@Test @Disabled public void testActivateUser() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userName", "user_test"); MvcResult mvcResult = mockMvc.perform(post("/users/activate") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); }
@UdafFactory(description = "Compute sample standard deviation of column with type Long.", aggregateSchema = "STRUCT<SUM bigint, COUNT bigint, M2 double>") public static TableUdaf<Long, Struct, Double> stdDevLong() { return getStdDevImplementation( 0L, STRUCT_LONG, (agg, newValue) -> newValue + agg.getInt64(SUM), (agg, newValue) -> Double.valueOf(newValue * (agg.getInt64(COUNT) + 1) - (agg.getInt64(SUM) + newValue)), (agg1, agg2) -> Double.valueOf( agg1.getInt64(SUM) / agg1.getInt64(COUNT) - agg2.getInt64(SUM) / agg2.getInt64(COUNT)), (agg1, agg2) -> agg1.getInt64(SUM) + agg2.getInt64(SUM), (agg, valueToRemove) -> agg.getInt64(SUM) - valueToRemove); }
@Test public void shouldCalculateStdDevLongs() { final TableUdaf<Long, Struct, Double> udaf = stdDevLong(); Struct agg = udaf.initialize(); final Long[] values = new Long[] {1L, 2L, 3L, 4L, 5L}; for (final Long thisValue : values) { agg = udaf.aggregate(thisValue, agg); } assertThat(agg.getInt64(COUNT), equalTo(5L)); assertThat(agg.getInt64(SUM), equalTo(15L)); assertThat(agg.getFloat64(M2), equalTo(10.0)); final double standardDev = udaf.map(agg); assertThat(standardDev, equalTo(2.5)); }
@Override public final void onLoadCleared(@Nullable Drawable placeholder) { sizeDeterminer.clearCallbacksAndListener(); onResourceCleared(placeholder); if (!isClearedByUs) { maybeRemoveAttachStateListener(); } }
@Test public void onLoadCleared_withoutClearOnDetach_doesNotRemoveListeners() { final AtomicInteger count = new AtomicInteger(); OnAttachStateChangeListener expected = new OnAttachStateChangeListener() { @Override public void onViewAttachedToWindow(View v) { count.incrementAndGet(); } @Override public void onViewDetachedFromWindow(View v) { // Intentionally Empty. } }; view.addOnAttachStateChangeListener(expected); attachStateTarget.onLoadCleared(/* placeholder= */ null); activity.visible(); Truth.assertThat(count.get()).isEqualTo(1); }
int decideParallelism( JobVertexID jobVertexId, List<BlockingResultInfo> consumedResults, int minParallelism, int maxParallelism) { checkArgument(!consumedResults.isEmpty()); // Considering that the sizes of broadcast results are usually very small, we compute the // parallelism only based on sizes of non-broadcast results final List<BlockingResultInfo> nonBroadcastResults = getNonBroadcastResultInfos(consumedResults); if (nonBroadcastResults.isEmpty()) { return minParallelism; } long totalBytes = nonBroadcastResults.stream() .mapToLong(BlockingResultInfo::getNumBytesProduced) .sum(); int parallelism = (int) Math.ceil((double) totalBytes / dataVolumePerTask); int minParallelismLimitedByMaxSubpartitions = (int) Math.ceil( (double) getMaxNumSubpartitions(nonBroadcastResults) / MAX_NUM_SUBPARTITIONS_PER_TASK_CONSUME); parallelism = Math.max(parallelism, minParallelismLimitedByMaxSubpartitions); LOG.debug( "The total size of non-broadcast data is {}, the initially decided parallelism of job vertex {} is {}.", new MemorySize(totalBytes), jobVertexId, parallelism); if (parallelism < minParallelism) { LOG.info( "The initially decided parallelism {} is smaller than the minimum parallelism {}. " + "Use {} as the finally decided parallelism of job vertex {}.", parallelism, minParallelism, minParallelism, jobVertexId); parallelism = minParallelism; } else if (parallelism > maxParallelism) { LOG.info( "The initially decided parallelism {} is larger than the maximum parallelism {}. " + "Use {} as the finally decided parallelism of job vertex {}.", parallelism, maxParallelism, maxParallelism, jobVertexId); parallelism = maxParallelism; } return parallelism; }
@Test void testDecideParallelism() { BlockingResultInfo resultInfo1 = createFromBroadcastResult(BYTE_256_MB); BlockingResultInfo resultInfo2 = createFromNonBroadcastResult(BYTE_256_MB + BYTE_8_GB); int parallelism = createDeciderAndDecideParallelism(Arrays.asList(resultInfo1, resultInfo2)); assertThat(parallelism).isEqualTo(9); }
@Override public Processor<K, Change<V>, KO, SubscriptionWrapper<K>> get() { return new UnbindChangeProcessor(); }
@Test public void leftJoinShouldPropagateNewRecordOfUnchangedFK() { final MockInternalNewProcessorContext<String, SubscriptionWrapper<String>> context = new MockInternalNewProcessorContext<>(); leftJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); final LeftValue leftRecordValue = new LeftValue(fk1); leftJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, leftRecordValue), 0)); assertThat(context.forwarded().size(), is(1)); assertThat( context.forwarded().get(0).record(), is(new Record<>(fk1, new SubscriptionWrapper<>(hash(leftRecordValue), PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0)) ); }
@Override public String getFileName(String baseRepositoryURL, Map<String, List<String>> headers) { if (headers != null) { for (String key : headers.keySet()) { if (key != null && key.equalsIgnoreCase(GITLAB_FILE_NAME_HEADER)) { return headers.get(key).get(0); } } } // If not present, extract from raw URL. String lastPath = baseRepositoryURL.substring(baseRepositoryURL.lastIndexOf("/") + 1); if (lastPath.startsWith("raw?ref=") || lastPath.indexOf('.') == -1) { String[] pathElements = baseRepositoryURL.split("/"); int i = pathElements.length; while (i > 0) { String path = pathElements[i - 1]; if (path.contains(".") && path.contains(ENCODED_FILE_SEPARATOR)) { return path.substring(path.indexOf(ENCODED_FILE_SEPARATOR) + ENCODED_FILE_SEPARATOR.length()); } i--; } } return null; }
@Test void testGetFileName() { GitLabReferenceURLBuilder builder = new GitLabReferenceURLBuilder(); assertEquals("API_Pastry_1.0.0-openapi.yaml", builder.getFileName(BASE_URL, null)); Map<String, List<String>> headers = Map.of("X-Gitlab-File-Name", List.of("FooBar.yaml")); assertEquals("FooBar.yaml", builder.getFileName(BASE_URL, headers)); headers = Map.of("x-gitlab-file-name", List.of("FooBar.yaml")); assertEquals("FooBar.yaml", builder.getFileName(BASE_URL, headers)); }
@Override public CompletableFuture<Void> globalCleanupAsync(JobID jobID, Executor executor) { return CompletableFuture.runAsync( () -> { logger.info("Clean up the high availability data for job {}.", jobID); try { internalCleanupJobData(jobID); } catch (Exception e) { throw new CompletionException(e); } logger.info( "Finished cleaning up the high availability data for job {}.", jobID); }, executor); }
@Test void testCleanupJobData() throws Exception { final Queue<CloseOperations> closeOperations = new ArrayDeque<>(3); final TestingBlobStoreService testingBlobStoreService = new TestingBlobStoreService(closeOperations); JobID jobID = new JobID(); CompletableFuture<JobID> jobCleanupFuture = new CompletableFuture<>(); final TestingHaServices haServices = new TestingHaServices( new Configuration(), Executors.directExecutor(), testingBlobStoreService, closeOperations, () -> {}, jobCleanupFuture::complete); haServices.globalCleanupAsync(jobID, Executors.directExecutor()).join(); JobID jobIDCleaned = jobCleanupFuture.get(); assertThat(jobIDCleaned).isEqualTo(jobID); }
public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment " + file + " to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; }
@Test public void testTruncateNotCalledIfSizeIsSameAsTargetSize() throws IOException { FileChannel channelMock = mock(FileChannel.class); when(channelMock.size()).thenReturn(42L); when(channelMock.position(42L)).thenReturn(null); FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); fileRecords.truncateTo(42); verify(channelMock, atLeastOnce()).size(); verify(channelMock, times(0)).truncate(anyLong()); }
@Override public void check(final String databaseName, final ReadwriteSplittingRuleConfiguration ruleConfig, final Map<String, DataSource> dataSourceMap, final Collection<ShardingSphereRule> builtRules) { checkDataSources(databaseName, ruleConfig.getDataSourceGroups(), dataSourceMap, builtRules); checkLoadBalancer(databaseName, ruleConfig); }
@SuppressWarnings({"rawtypes", "unchecked"}) @Test void assertCheckWhenConfigOtherRulesDatasource() { ReadwriteSplittingRuleConfiguration config = createContainsOtherRulesDatasourceConfiguration(); RuleConfigurationChecker checker = OrderedSPILoader.getServicesByClass(RuleConfigurationChecker.class, Collections.singleton(config.getClass())).get(config.getClass()); ShardingSphereRule rule = mock(ShardingSphereRule.class); DataSourceMapperRuleAttribute ruleAttribute = mock(DataSourceMapperRuleAttribute.class, RETURNS_DEEP_STUBS); when(ruleAttribute.getDataSourceMapper().containsKey("otherDatasourceName")).thenReturn(true); when(rule.getAttributes()).thenReturn(new RuleAttributes(ruleAttribute)); checker.check("test", config, mockDataSources(), Collections.singleton(rule)); }
public static <T extends PipelineOptions> T validate(Class<T> klass, PipelineOptions options) { return validate(klass, options, false); }
@Test public void testWhenOneOfRequiredGroupIsSetIsValid() { GroupRequired groupRequired = PipelineOptionsFactory.as(GroupRequired.class); groupRequired.setFoo("foo"); groupRequired.setBar(null); groupRequired.setRunner(CrashingRunner.class); PipelineOptionsValidator.validate(GroupRequired.class, groupRequired); // Symmetric groupRequired.setFoo(null); groupRequired.setBar("bar"); PipelineOptionsValidator.validate(GroupRequired.class, groupRequired); }
@Override public void deleteTenant(Long id) { // 校验存在 validateUpdateTenant(id); // 删除 tenantMapper.deleteById(id); }
@Test public void testDeleteTenant_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> tenantService.deleteTenant(id), TENANT_NOT_EXISTS); }
@Override public T build(ConfigurationSourceProvider provider, String path) throws IOException, ConfigurationException { try (InputStream input = provider.open(requireNonNull(path))) { final JsonNode node = mapper.readTree(createParser(input)); if (node == null) { throw ConfigurationParsingException .builder("Configuration at " + path + " must not be empty") .build(path); } return build(node, path); } catch (JsonParseException e) { throw ConfigurationParsingException .builder("Malformed " + formatName) .setCause(e) .setLocation(e.getLocation()) .setDetail(e.getMessage()) .build(path); } }
@Test void usesDefaultedCacheBuilderSpec() throws Exception { final ExampleWithDefaults example = new YamlConfigurationFactory<>(ExampleWithDefaults.class, validator, Jackson.newObjectMapper(), "dw") .build(); assertThat(example.cacheBuilderSpec) .isNotNull() .isEqualTo(CaffeineSpec.parse("initialCapacity=0,maximumSize=0")); }
@Override public void run() { logger.info(this.getServiceName() + " service started"); while (!this.isStopped()) { try { MessageRequest messageRequest = this.messageRequestQueue.take(); if (messageRequest.getMessageRequestMode() == MessageRequestMode.POP) { this.popMessage((PopRequest) messageRequest); } else { this.pullMessage((PullRequest) messageRequest); } } catch (InterruptedException ignored) { } catch (Exception e) { logger.error("Pull Message Service Run Method exception", e); } } logger.info(this.getServiceName() + " service end"); }
@Test public void testRunWithNullConsumer() throws InterruptedException, IllegalAccessException { LinkedBlockingQueue<MessageRequest> messageRequestQueue = new LinkedBlockingQueue<>(); PopRequest popRequest = mock(PopRequest.class); when(popRequest.getMessageRequestMode()).thenReturn(MessageRequestMode.POP); when(popRequest.getConsumerGroup()).thenReturn(defaultGroup); messageRequestQueue.put(popRequest); FieldUtils.writeDeclaredField(pullMessageService, "messageRequestQueue", messageRequestQueue, true); new Thread(() -> pullMessageService.run()).start(); TimeUnit.SECONDS.sleep(1); pullMessageService.makeStop(); verify(mQClientFactory, times(1)).selectConsumer(eq(defaultGroup)); }
public Location setX(double x) { return new Location(extent, position.withX(x), yaw, pitch); }
@Test public void testSetX() throws Exception { World world = mock(World.class); Location location1 = new Location(world, Vector3.ZERO); Location location2 = location1.setX(TEST_VALUE); assertEquals(0, location1.getX(), EPSILON); assertEquals(TEST_VALUE, location2.getX(), EPSILON); assertEquals(0, location2.getY(), EPSILON); assertEquals(0, location2.getZ(), EPSILON); }
@Override protected Map<String, ByteBuffer> onLeaderElected(String leaderId, String assignmentStrategy, List<JoinGroupResponseData.JoinGroupResponseMember> allSubscriptions, boolean skipAssignment) { ConsumerPartitionAssignor assignor = lookupAssignor(assignmentStrategy); if (assignor == null) throw new IllegalStateException("Coordinator selected invalid assignment protocol: " + assignmentStrategy); String assignorName = assignor.name(); Set<String> allSubscribedTopics = new HashSet<>(); Map<String, Subscription> subscriptions = new HashMap<>(); // collect all the owned partitions Map<String, List<TopicPartition>> ownedPartitions = new HashMap<>(); for (JoinGroupResponseData.JoinGroupResponseMember memberSubscription : allSubscriptions) { Subscription subscription = ConsumerProtocol.deserializeSubscription(ByteBuffer.wrap(memberSubscription.metadata())); subscription.setGroupInstanceId(Optional.ofNullable(memberSubscription.groupInstanceId())); subscriptions.put(memberSubscription.memberId(), subscription); allSubscribedTopics.addAll(subscription.topics()); ownedPartitions.put(memberSubscription.memberId(), subscription.ownedPartitions()); } // the leader will begin watching for changes to any of the topics the group is interested in, // which ensures that all metadata changes will eventually be seen updateGroupSubscription(allSubscribedTopics); isLeader = true; if (skipAssignment) { log.info("Skipped assignment for returning static leader at generation {}. The static leader " + "will continue with its existing assignment.", generation().generationId); assignmentSnapshot = metadataSnapshot; return Collections.emptyMap(); } log.debug("Performing assignment using strategy {} with subscriptions {}", assignorName, subscriptions); Map<String, Assignment> assignments = assignor.assign(metadata.fetch(), new GroupSubscription(subscriptions)).groupAssignment(); // skip the validation for built-in cooperative sticky assignor since we've considered // the "generation" of ownedPartition inside the assignor if (protocol == RebalanceProtocol.COOPERATIVE && !assignorName.equals(COOPERATIVE_STICKY_ASSIGNOR_NAME)) { validateCooperativeAssignment(ownedPartitions, assignments); } maybeUpdateGroupSubscription(assignorName, assignments, allSubscribedTopics); // metadataSnapshot could be updated when the subscription is updated therefore // we must take the assignment snapshot after. assignmentSnapshot = metadataSnapshot; log.info("Finished assignment for group at generation {}: {}", generation().generationId, assignments); Map<String, ByteBuffer> groupAssignment = new HashMap<>(); for (Map.Entry<String, Assignment> assignmentEntry : assignments.entrySet()) { ByteBuffer buffer = ConsumerProtocol.serializeAssignment(assignmentEntry.getValue()); groupAssignment.put(assignmentEntry.getKey(), buffer); } return groupAssignment; }
@Test public void testPerformAssignmentShouldValidateCooperativeAssignment() { SubscriptionState mockSubscriptionState = Mockito.mock(SubscriptionState.class); List<JoinGroupResponseData.JoinGroupResponseMember> metadata = validateCooperativeAssignmentTestSetup(); // simulate the custom cooperative assignor didn't revoke the partition first before assign to other consumer Map<String, List<TopicPartition>> assignment = new HashMap<>(); assignment.put(consumerId, singletonList(t1p)); assignment.put(consumerId2, singletonList(t2p)); partitionAssignor.prepare(assignment); try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, mockSubscriptionState)) { if (protocol == COOPERATIVE) { // in cooperative protocol, we should throw exception when validating cooperative assignment Exception e = assertThrows(IllegalStateException.class, () -> coordinator.onLeaderElected("1", partitionAssignor.name(), metadata, false)); assertTrue(e.getMessage().contains("Assignor supporting the COOPERATIVE protocol violates its requirements")); } else { // in eager protocol, we should not validate assignment coordinator.onLeaderElected("1", partitionAssignor.name(), metadata, false); } } }
ZkClient buildZkClient(String address, int sessionTimeout, int connectTimeout,String... authInfo) { ZkClient zkClient = new ZkClient(address, sessionTimeout, connectTimeout); if (authInfo != null && authInfo.length == 2) { if (!StringUtils.isBlank(authInfo[0]) && !StringUtils.isBlank(authInfo[1])) { StringBuilder auth = new StringBuilder(authInfo[0]).append(":").append(authInfo[1]); zkClient.addAuthInfo("digest", auth.toString().getBytes()); } } if (!zkClient.exists(ROOT_PATH_WITHOUT_SUFFIX)) { zkClient.createPersistent(ROOT_PATH_WITHOUT_SUFFIX, true); } zkClient.subscribeStateChanges(new IZkStateListener() { @Override public void handleStateChanged(Watcher.Event.KeeperState keeperState) throws Exception { //ignore } @Override public void handleNewSession() throws Exception { recover(); } @Override public void handleSessionEstablishmentError(Throwable throwable) throws Exception { //ignore } }); return zkClient; }
@Test public void buildZkTest() { ZkClient client = service.buildZkClient("127.0.0.1:2181", 5000, 5000); Assertions.assertTrue(client.exists("/zookeeper")); }
@Override public boolean canSerialize(String topic, Target type) { String subject = schemaSubject(topic, type); return getSchemaBySubject(subject).isPresent(); }
@Test void canSerializeReturnsFalseIfSubjectDoesNotExist() { String topic = RandomString.make(10); assertThat(serde.canSerialize(topic, Serde.Target.KEY)).isFalse(); assertThat(serde.canSerialize(topic, Serde.Target.VALUE)).isFalse(); }
@Override public <KeyT> void onTimer( String timerId, String timerFamilyId, KeyT key, BoundedWindow window, Instant timestamp, Instant outputTimestamp, TimeDomain timeDomain) { Preconditions.checkNotNull(outputTimestamp, "outputTimestamp"); OnTimerArgumentProvider<KeyT> argumentProvider = new OnTimerArgumentProvider<>(timerId, key, window, timestamp, outputTimestamp, timeDomain); invoker.invokeOnTimer(timerId, timerFamilyId, argumentProvider); }
@Test public void testOnTimerNoSkew() { TimerOutputSkewingDoFn fn = new TimerOutputSkewingDoFn(Duration.ZERO, Duration.millis(5)); DoFnRunner<KV<String, Duration>, Duration> runner = new SimpleDoFnRunner<>( null, fn, NullSideInputReader.empty(), null, null, Collections.emptyList(), mockStepContext, null, Collections.emptyMap(), WindowingStrategy.of(new GlobalWindows()), DoFnSchemaInformation.create(), Collections.emptyMap()); Exception exception = assertThrows( UserCodeException.class, () -> { runner.onTimer( TimerDeclaration.PREFIX + TimerOutputSkewingDoFn.TIMER_ID, "", null, GlobalWindow.INSTANCE, new Instant(0), new Instant(0), TimeDomain.EVENT_TIME); }); assertThat(exception.getCause(), isA(IllegalArgumentException.class)); assertThat( exception.getMessage(), allOf( containsString("must be no earlier"), containsString( String.format( "timestamp of the current input or timer (%s)", new Instant(0).toString())), containsString( String.format( "the allowed skew (%s)", PeriodFormat.getDefault().print(Duration.ZERO.toPeriod()))))); }
public static String split(String code, int maxMethodLength, int maxClassMemberCount) { try { return splitImpl(code, maxMethodLength, maxClassMemberCount); } catch (Throwable t) { throw new RuntimeException( "JavaCodeSplitter failed. This is a bug. Please file an issue.", t); } }
@Test @Disabled("Disabled in because of https://issues.apache.org/jira/browse/FLINK-27702") void testInvalidJavaCode() { try { JavaCodeSplitter.split("public class InvalidClass { return 1; }", 4000, 10000); } catch (Exception e) { assertThat(e) .satisfies( matching( FlinkMatchers.containsMessage( "JavaCodeSplitter failed. This is a bug. Please file an issue."))); } }
protected GelfMessage toGELFMessage(final Message message) { final DateTime timestamp; final Object fieldTimeStamp = message.getField(Message.FIELD_TIMESTAMP); if (fieldTimeStamp instanceof DateTime) { timestamp = (DateTime) fieldTimeStamp; } else { timestamp = Tools.nowUTC(); } final GelfMessageLevel messageLevel = extractLevel(message.getField(Message.FIELD_LEVEL)); final String fullMessage = (String) message.getField(Message.FIELD_FULL_MESSAGE); final String forwarder = GelfOutput.class.getCanonicalName(); final GelfMessageBuilder builder = new GelfMessageBuilder(message.getMessage(), message.getSource()) .timestamp(timestamp.getMillis() / 1000.0d) .additionalField("_forwarder", forwarder) .additionalFields(message.getFields()); if (messageLevel != null) { builder.level(messageLevel); } if (fullMessage != null) { builder.fullMessage(fullMessage); } return builder.build(); }
@Test public void testToGELFMessageWithNonStringFacility() throws Exception { final GelfTransport transport = mock(GelfTransport.class); final GelfOutput gelfOutput = new GelfOutput(transport); final DateTime now = DateTime.now(DateTimeZone.UTC); final Message message = messageFactory.createMessage("Test", "Source", now); message.addField("facility", 42L); final GelfMessage gelfMessage = gelfOutput.toGELFMessage(message); assertEquals(42L, gelfMessage.getAdditionalFields().get("facility")); }
@Override public void submit(VplsOperation vplsOperation) { if (isLeader) { // Only leader can execute operation addVplsOperation(vplsOperation); } }
@Test public void testSubmitRemoveOperation() { VplsData vplsData = VplsData.of(VPLS1); vplsData.addInterfaces(ImmutableSet.of(V100H1, V100H2)); vplsData.state(VplsData.VplsState.REMOVING); VplsOperation vplsOperation = VplsOperation.of(vplsData, VplsOperation.Operation.REMOVE); vplsOperationManager.submit(vplsOperation); assertAfter(OPERATION_DELAY, OPERATION_DURATION, () -> { Collection<VplsData> vplss = vplsOperationManager.vplsStore.getAllVpls(); assertEquals(0, vplss.size()); }); }
@Override public void validate(Context context) { if (! context.deployState().isHosted()) return; if (context.model().getAdmin().getApplicationType() != ApplicationType.DEFAULT) return; for (ContainerCluster<?> cluster : context.model().getContainerClusters().values()) { if (cluster.getSecretStore().isPresent() && ! hasIdentityProvider(cluster)) context.illegal(String.format( "Container cluster '%s' uses a secret store, so an Athenz domain and an Athenz service" + " must be declared in deployment.xml.", cluster.getName())); } }
@Test void app_without_secret_store_passes_validation_without_athenz_in_deployment() throws Exception { String servicesXml = joinLines("<services version='1.0'>", " <container id='default' version='1.0' />", "</services>"); DeployState deployState = deployState(servicesXml, deploymentXml(false)); VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState); ValidationTester.validate(new SecretStoreValidator(), model, deployState); }
@Override public E element() { final E e = peek(); if (e == null) { throw new NoSuchElementException("Queue is empty"); } return e; }
@Test(expected = NoSuchElementException.class) public void testElement_whenEmpty() { queue.element(); }
@Override protected void handleClass(final Class<?> clazz, final Object bean, @NonNull final ShenyuSpringWebSocketClient beanShenyuClient, final String superPath) { Method[] methods = ReflectionUtils.getDeclaredMethods(clazz); for (Method method : methods) { final MetaDataRegisterDTO metaData = buildMetaDataDTO(bean, beanShenyuClient, pathJoin(getContextPath(), superPath), clazz, method); getPublisher().publishEvent(metaData); getMetaDataMap().put(method, metaData); } }
@Test public void testHandleClass() { Class<MockClass> clazz = MockClass.class; eventListener.handleClass(clazz, mockClass, annotation, SUPER_PATH); }
List<PartitionInfo> partitionsFor(final String topic) { return producer.partitionsFor(topic); }
@Test public void shouldForwardCallToPartitionsFor() { final List<PartitionInfo> expectedPartitionInfo = Collections.emptyList(); when(mockedProducer.partitionsFor(topic)).thenReturn(expectedPartitionInfo); final List<PartitionInfo> partitionInfo = streamsProducerWithMock.partitionsFor(topic); assertThat(partitionInfo, sameInstance(expectedPartitionInfo)); }
public Map<String, FieldMapping> fieldTypes(final String index) { final JsonNode result = client.executeRequest(request(index), "Unable to retrieve field types of index " + index); final JsonNode fields = result.path(index).path("mappings").path("properties"); //noinspection UnstableApiUsage return Streams.stream(fields.fields()) .collect(Collectors.toMap(Map.Entry::getKey, entry -> { final JsonNode entryValue = entry.getValue(); String type = entryValue.path("type").asText(); if ("alias".equals(type)) { String aliasPath = entryValue.path("path").asText(); type = fields.path(aliasPath).path("type").asText(); } return FieldMapping.create( type, entryValue.path("fielddata").asBoolean() ); })); }
@Test void testAliasTypeIsProperlyResolved() throws Exception { String mappingResponse = """ { "graylog_42": { "mappings": { "properties": { "action_alias": { "type": "alias", "path": "action" }, "action": { "type": "keyword" } } } } } """; doReturn(objectMapper.readTree(mappingResponse)) .when(client) .executeRequest(eq(new Request("GET", "/graylog_42/_mapping")), anyString()); final Map<String, FieldMappingApi.FieldMapping> expectedResult = Map.of( "action_alias", FieldMappingApi.FieldMapping.create("keyword", false), "action", FieldMappingApi.FieldMapping.create("keyword", false) ); final Map<String, FieldMappingApi.FieldMapping> result = toTest.fieldTypes("graylog_42"); assertEquals(expectedResult, result); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { ctx.logJsEvalRequest(); Futures.addCallback(scriptEngine.executeSwitchAsync(msg), new FutureCallback<>() { @Override public void onSuccess(@Nullable Set<String> result) { ctx.logJsEvalResponse(); processSwitch(ctx, msg, result); } @Override public void onFailure(Throwable t) { ctx.logJsEvalFailure(); ctx.tellFailure(msg, t); } }, MoreExecutors.directExecutor()); //usually runs in a callbackExecutor }
@Test public void multipleRoutesAreAllowed() throws TbNodeException { initWithScript(); TbMsgMetaData metaData = new TbMsgMetaData(); metaData.putValue("temp", "10"); metaData.putValue("humidity", "99"); String rawJson = "{\"name\": \"Vit\", \"passed\": 5}"; TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, null, metaData, TbMsgDataType.JSON, rawJson, ruleChainId, ruleNodeId); when(scriptEngine.executeSwitchAsync(msg)).thenReturn(Futures.immediateFuture(Sets.newHashSet("one", "three"))); node.onMsg(ctx, msg); verify(ctx).tellNext(msg, Sets.newHashSet("one", "three")); }
@Override public YamlTransactionRuleConfiguration swapToYamlConfiguration(final TransactionRuleConfiguration data) { YamlTransactionRuleConfiguration result = new YamlTransactionRuleConfiguration(); result.setDefaultType(data.getDefaultType()); result.setProviderType(data.getProviderType()); result.setProps(data.getProps()); return result; }
@Test void assertSwapToYamlConfiguration() { YamlTransactionRuleConfiguration actual = new YamlTransactionRuleConfigurationSwapper().swapToYamlConfiguration(new TransactionRuleConfiguration("default", "provider", new Properties())); assertThat(actual.getDefaultType(), is("default")); assertThat(actual.getProviderType(), is("provider")); assertThat(actual.getProps(), is(new Properties())); }
public static ScalarOperator compoundAnd(Collection<ScalarOperator> nodes) { return createCompound(CompoundPredicateOperator.CompoundType.AND, nodes); }
@Test public void compoundAnd1() { ScalarOperator tree1 = Utils.compoundAnd(ConstantOperator.createInt(1), ConstantOperator.createInt(2), ConstantOperator.createInt(3), ConstantOperator.createInt(4), ConstantOperator.createInt(5)); assertEquals(CompoundPredicateOperator.CompoundType.AND, ((CompoundPredicateOperator) tree1).getCompoundType()); assertEquals(CompoundPredicateOperator.CompoundType.AND, ((CompoundPredicateOperator) tree1.getChild(0)).getCompoundType()); assertEquals(5, ((ConstantOperator) tree1.getChild(1)).getInt()); assertEquals(CompoundPredicateOperator.CompoundType.AND, ((CompoundPredicateOperator) tree1.getChild(0).getChild(0)).getCompoundType()); assertEquals(CompoundPredicateOperator.CompoundType.AND, ((CompoundPredicateOperator) tree1.getChild(0).getChild(1)).getCompoundType()); assertEquals(1, ((ConstantOperator) tree1.getChild(0).getChild(0).getChild(0)).getInt()); assertEquals(2, ((ConstantOperator) tree1.getChild(0).getChild(0).getChild(1)).getInt()); assertEquals(3, ((ConstantOperator) tree1.getChild(0).getChild(1).getChild(0)).getInt()); assertEquals(4, ((ConstantOperator) tree1.getChild(0).getChild(1).getChild(1)).getInt()); }
@Override public Serde<GenericKey> create( final FormatInfo format, final PersistenceSchema schema, final KsqlConfig ksqlConfig, final Supplier<SchemaRegistryClient> schemaRegistryClientFactory, final String loggerNamePrefix, final ProcessingLogContext processingLogContext, final Optional<TrackedCallback> tracker ) { return createInner( format, schema, ksqlConfig, schemaRegistryClientFactory, loggerNamePrefix, processingLogContext, tracker ); }
@Test public void shouldReturnLoggingSerdeNonWindowed() { // When: final Serde<GenericKey> result = factory .create(format, schema, config, srClientFactory, LOGGER_PREFIX, processingLogCxt, Optional.empty()); // Then: assertThat(result, is(sameInstance(loggingSerde))); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldDeserializeNullAsNull() { assertThat(deserializer.deserialize(SOME_TOPIC, null), is(nullValue())); }
public static InternalLogger getInstance(Class<?> clazz) { return getInstance(clazz.getName()); }
@Test public void testTraceWithException() { final InternalLogger logger = InternalLoggerFactory.getInstance("mock"); logger.trace("a", e); verify(mockLogger).trace("a", e); }
@Override public V get(Object key) { //noinspection SuspiciousMethodCalls final int index = keys.indexOf(key); if (index > -1) { return values.get(index); } return null; }
@Test public void getTest(){ final TableMap<String, Integer> tableMap = new TableMap<>(16); tableMap.put("aaa", 111); tableMap.put("bbb", 222); assertEquals(new Integer(111), tableMap.get("aaa")); assertEquals(new Integer(222), tableMap.get("bbb")); assertEquals("aaa", tableMap.getKey(111)); assertEquals("bbb", tableMap.getKey(222)); }
static int validatePubsubMessageSize(PubsubMessage message, int maxPublishBatchSize) throws SizeLimitExceededException { int payloadSize = message.getPayload().length; if (payloadSize > PUBSUB_MESSAGE_DATA_MAX_BYTES) { throw new SizeLimitExceededException( "Pubsub message data field of length " + payloadSize + " exceeds maximum of " + PUBSUB_MESSAGE_DATA_MAX_BYTES + " bytes. See https://cloud.google.com/pubsub/quotas#resource_limits"); } int totalSize = payloadSize; @Nullable Map<String, String> attributes = message.getAttributeMap(); if (attributes != null) { if (attributes.size() > PUBSUB_MESSAGE_MAX_ATTRIBUTES) { throw new SizeLimitExceededException( "Pubsub message contains " + attributes.size() + " attributes which exceeds the maximum of " + PUBSUB_MESSAGE_MAX_ATTRIBUTES + ". See https://cloud.google.com/pubsub/quotas#resource_limits"); } // Consider attribute encoding overhead, so it doesn't go over the request limits totalSize += attributes.size() * PUBSUB_MESSAGE_ATTRIBUTE_ENCODE_ADDITIONAL_BYTES; for (Map.Entry<String, String> attribute : attributes.entrySet()) { String key = attribute.getKey(); int keySize = key.getBytes(StandardCharsets.UTF_8).length; if (keySize > PUBSUB_MESSAGE_ATTRIBUTE_MAX_KEY_BYTES) { throw new SizeLimitExceededException( "Pubsub message attribute key '" + key + "' exceeds the maximum of " + PUBSUB_MESSAGE_ATTRIBUTE_MAX_KEY_BYTES + " bytes. See https://cloud.google.com/pubsub/quotas#resource_limits"); } totalSize += keySize; String value = attribute.getValue(); int valueSize = value.getBytes(StandardCharsets.UTF_8).length; if (valueSize > PUBSUB_MESSAGE_ATTRIBUTE_MAX_VALUE_BYTES) { throw new SizeLimitExceededException( "Pubsub message attribute value for key '" + key + "' starting with '" + value.substring(0, Math.min(256, value.length())) + "' exceeds the maximum of " + PUBSUB_MESSAGE_ATTRIBUTE_MAX_VALUE_BYTES + " bytes. See https://cloud.google.com/pubsub/quotas#resource_limits"); } totalSize += valueSize; } } if (totalSize > maxPublishBatchSize) { throw new SizeLimitExceededException( "Pubsub message of length " + totalSize + " exceeds maximum of " + maxPublishBatchSize + " bytes, when considering the payload and attributes. " + "See https://cloud.google.com/pubsub/quotas#resource_limits"); } return totalSize; }
@Test public void testValidatePubsubMessageSizePayloadAndAttributes() throws SizeLimitExceededException { byte[] data = new byte[1024]; String attributeKey = "key"; String attributeValue = "value"; Map<String, String> attributes = ImmutableMap.of(attributeKey, attributeValue); PubsubMessage message = new PubsubMessage(data, attributes); int messageSize = PreparePubsubWriteDoFn.validatePubsubMessageSize(message, PUBSUB_MESSAGE_MAX_TOTAL_SIZE); assertEquals( data.length + 6 // PUBSUB_MESSAGE_ATTRIBUTE_ENCODE_ADDITIONAL_BYTES + attributeKey.getBytes(StandardCharsets.UTF_8).length + attributeValue.getBytes(StandardCharsets.UTF_8).length, messageSize); }
public String format(AlarmEntity alarmEntity) { StringBuilder message = new StringBuilder(); for (int i = 0; i < formatSegments.size(); i++) { message.append(formatSegments.get(i)); if (i != formatSegments.size() - 1) { switch (valueFroms.get(i)) { case ID: message.append(alarmEntity.getId0()); break; case NAME: message.append(alarmEntity.getName()); } } } return message.toString(); }
@Test public void testStringFormatWithArg() { AlarmMessageFormatter formatter = new AlarmMessageFormatter("abc} words {name} - {id} .. {"); String message = formatter.format(new AlarmEntity("SERVICE", -1, "service", "1290", "")); Assertions.assertEquals("abc} words service - 1290 .. {", message); }
@Override public void startLeaderElection(LeaderContender contender) throws Exception { Preconditions.checkNotNull(contender); parentService.register(componentId, contender); }
@Test void testContenderRegistrationFailure() throws Exception { final Exception expectedException = new Exception("Expected exception during contender registration."); final DefaultLeaderElection.ParentService parentService = TestingAbstractLeaderElectionService.newBuilder() .setRegisterConsumer( (actualComponentId, actualContender) -> { throw expectedException; }) .build(); try (final DefaultLeaderElection testInstance = new DefaultLeaderElection(parentService, DEFAULT_TEST_COMPONENT_ID)) { assertThatThrownBy( () -> testInstance.startLeaderElection( TestingGenericLeaderContender.newBuilder().build())) .isEqualTo(expectedException); } }
@Override public SchemaResult getValueSchema( final Optional<String> topicName, final Optional<Integer> schemaId, final FormatInfo expectedFormat, final SerdeFeatures serdeFeatures ) { return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, false); }
@Test public void shouldReturnErrorFromGetValueIfForbidden() throws Exception { // Given: when(srClient.getSchemaBySubjectAndId(any(), anyInt())) .thenThrow(forbiddenException()); // When: final SchemaResult result = supplier.getValueSchema(Optional.of(TOPIC_NAME), Optional.of(42), expectedFormat, SerdeFeatures.of()); // Then: assertThat(result.schemaAndId, is(Optional.empty())); assertThat(result.failureReason, is(not(Optional.empty()))); verifyFailureMessageForValue(result, Optional.of(42)); }
@Override public void preflight(final Path workdir, final String filename) throws BackgroundException { if(!validate(filename)) { throw new InvalidFilenameException(MessageFormat.format(LocaleFactory.localizedString("Cannot create folder {0}", "Error"), filename)); } assumeRole(workdir, filename, CREATEDIRECTORIESPERMISSION); }
@Test public void testPreflightFileAccessDeniedCustomProps() throws Exception { final Path workdir = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)); workdir.setAttributes(workdir.attributes().withAcl(new Acl(new Acl.CanonicalUser(), READPERMISSION))); assertThrows(AccessDeniedException.class, () -> new CteraDirectoryFeature(session).preflight(workdir, new AlphanumericRandomStringService().random())); }
public void clearEmptyConfigurations() { List<ConfigurationProperty> propertiesToRemove = new ArrayList<>(); for (ConfigurationProperty configurationProperty : this) { ConfigurationValue configurationValue = configurationProperty.getConfigurationValue(); EncryptedConfigurationValue encryptedValue = configurationProperty.getEncryptedConfigurationValue(); if (StringUtils.isBlank(configurationProperty.getValue()) && (configurationValue == null || configurationValue.errors().isEmpty()) && (encryptedValue == null || encryptedValue.errors().isEmpty())) { propertiesToRemove.add(configurationProperty); } } this.removeAll(propertiesToRemove); }
@Test void shouldClearConfigurationsWhichAreEmptyAndNoErrors() throws Exception { Configuration configuration = new Configuration(); configuration.add(new ConfigurationProperty(new ConfigurationKey("name-one"), new ConfigurationValue())); configuration.add(new ConfigurationProperty(new ConfigurationKey("name-two"), new EncryptedConfigurationValue())); configuration.add(new ConfigurationProperty(new ConfigurationKey("name-three"), null, new EncryptedConfigurationValue(), null)); ConfigurationProperty configurationProperty = new ConfigurationProperty(new ConfigurationKey("name-four"), null, new EncryptedConfigurationValue(), null); configurationProperty.addErrorAgainstConfigurationValue("error"); configuration.add(configurationProperty); configuration.clearEmptyConfigurations(); assertThat(configuration.size()).isEqualTo(1); assertThat(configuration.get(0).getConfigurationKey().getName()).isEqualTo("name-four"); }
public SCMPropertyConfiguration getSCMConfiguration(String pluginId) { return pluginRequestHelper.submitRequest(pluginId, REQUEST_SCM_CONFIGURATION, new DefaultPluginInteractionCallback<>() { @Override public SCMPropertyConfiguration onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { return messageHandlerMap.get(resolvedExtensionVersion).responseMessageForSCMConfiguration(responseBody); } }); }
@Test public void shouldTalkToPluginToGetSCMConfiguration() throws Exception { SCMPropertyConfiguration deserializedResponse = new SCMPropertyConfiguration(); when(jsonMessageHandler.responseMessageForSCMConfiguration(responseBody)).thenReturn(deserializedResponse); SCMPropertyConfiguration response = scmExtension.getSCMConfiguration(PLUGIN_ID); assertRequest(requestArgumentCaptor.getValue(), SCM_EXTENSION, "1.0", SCMExtension.REQUEST_SCM_CONFIGURATION, null); verify(jsonMessageHandler).responseMessageForSCMConfiguration(responseBody); assertSame(response, deserializedResponse); }
public static void main(String[] args) { // Simple lazy loader - not thread safe var holderNaive = new HolderNaive(); var heavy = holderNaive.getHeavy(); LOGGER.info("heavy={}", heavy); // Thread safe lazy loader, but with heavy synchronization on each access var holderThreadSafe = new HolderThreadSafe(); var another = holderThreadSafe.getHeavy(); LOGGER.info("another={}", another); // The most efficient lazy loader utilizing Java 8 features var java8Holder = new Java8Holder(); var next = java8Holder.getHeavy(); LOGGER.info("next={}", next); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
public static String toGroupName(GsonTeam team) { return toGroupName(team.getOrganizationId(), team.getId()); }
@Test public void toGroupName_withGroupAndName_returnsCorrectGroupName() { assertThat(GithubTeamConverter.toGroupName("Org1", "team-1")).isEqualTo("Org1/team-1"); }
public static int compose(final int major, final int minor, final int patch) { if (major < 0 || major > 255) { throw new IllegalArgumentException("major must be 0-255: " + major); } if (minor < 0 || minor > 255) { throw new IllegalArgumentException("minor must be 0-255: " + minor); } if (patch < 0 || patch > 255) { throw new IllegalArgumentException("patch must be 0-255: " + patch); } if (major + minor + patch == 0) { throw new IllegalArgumentException("all parts cannot be zero"); } return (major << 16) | (minor << 8) | patch; }
@Test void shouldDetectExcessiveMinor() { assertThrows(IllegalArgumentException.class, () -> SemanticVersion.compose(1, 256, 1)); }
public static <P> Matcher<P> and(Iterable<? extends Matcher<P>> matchers) { return and(toArray(matchers)); }
@Test void and_multiple_matched() { Matcher<Void> one = b -> true; Matcher<Void> two = b -> true; Matcher<Void> three = b -> true; assertThat(and(one, two, three).matches(null)).isTrue(); }
NewExternalIssue mapResult(String driverName, @Nullable Result.Level ruleSeverity, @Nullable Result.Level ruleSeverityForNewTaxonomy, Result result) { NewExternalIssue newExternalIssue = sensorContext.newExternalIssue(); newExternalIssue.type(DEFAULT_TYPE); newExternalIssue.engineId(driverName); newExternalIssue.severity(toSonarQubeSeverity(ruleSeverity)); newExternalIssue.ruleId(requireNonNull(result.getRuleId(), "No ruleId found for issue thrown by driver " + driverName)); newExternalIssue.cleanCodeAttribute(DEFAULT_CLEAN_CODE_ATTRIBUTE); newExternalIssue.addImpact(DEFAULT_SOFTWARE_QUALITY, toSonarQubeImpactSeverity(ruleSeverityForNewTaxonomy)); mapLocations(result, newExternalIssue); return newExternalIssue; }
@Test public void mapResult_mapsCorrectlyCleanCodeAttribute() { NewExternalIssue newExternalIssue = resultMapper.mapResult(DRIVER_NAME, WARNING, WARNING, result); verify(newExternalIssue).cleanCodeAttribute(ResultMapper.DEFAULT_CLEAN_CODE_ATTRIBUTE); }
@VisibleForTesting WxMpService getWxMpService(Integer userType) { // 第一步,查询 DB 的配置项,获得对应的 WxMpService 对象 SocialClientDO client = socialClientMapper.selectBySocialTypeAndUserType( SocialTypeEnum.WECHAT_MP.getType(), userType); if (client != null && Objects.equals(client.getStatus(), CommonStatusEnum.ENABLE.getStatus())) { return wxMpServiceCache.getUnchecked(client.getClientId() + ":" + client.getClientSecret()); } // 第二步,不存在 DB 配置项,则使用 application-*.yaml 对应的 WxMpService 对象 return wxMpService; }
@Test public void testGetWxMpService_clientEnable() { // 准备参数 Integer userType = randomPojo(UserTypeEnum.class).getValue(); // mock 数据 SocialClientDO client = randomPojo(SocialClientDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus()) .setUserType(userType).setSocialType(SocialTypeEnum.WECHAT_MP.getType())); socialClientMapper.insert(client); // mock 方法 WxMpProperties.ConfigStorage configStorage = mock(WxMpProperties.ConfigStorage.class); when(wxMpProperties.getConfigStorage()).thenReturn(configStorage); // 调用 WxMpService result = socialClientService.getWxMpService(userType); // 断言 assertNotSame(wxMpService, result); assertEquals(client.getClientId(), result.getWxMpConfigStorage().getAppId()); assertEquals(client.getClientSecret(), result.getWxMpConfigStorage().getSecret()); }
public static boolean isFuture(String timestamp) { return !StringUtils.isEmpty(timestamp) && isFuture(Long.parseLong(timestamp)); }
@Test public void isFuture() { String nextMinute = String.valueOf(Calendar.getInstance().getTimeInMillis() + 60000); String previousMinute = String.valueOf(Calendar.getInstance().getTimeInMillis() - 60000); assertTrue(DateUtils.isFuture(nextMinute)); assertFalse(DateUtils.isFuture(previousMinute)); }
@Override public ApiResult<TopicPartition, DeletedRecords> handleResponse( Node broker, Set<TopicPartition> keys, AbstractResponse abstractResponse ) { DeleteRecordsResponse response = (DeleteRecordsResponse) abstractResponse; Map<TopicPartition, DeletedRecords> completed = new HashMap<>(); Map<TopicPartition, Throwable> failed = new HashMap<>(); List<TopicPartition> unmapped = new ArrayList<>(); Set<TopicPartition> retriable = new HashSet<>(); for (DeleteRecordsResponseData.DeleteRecordsTopicResult topicResult: response.data().topics()) { for (DeleteRecordsResponseData.DeleteRecordsPartitionResult partitionResult : topicResult.partitions()) { Errors error = Errors.forCode(partitionResult.errorCode()); TopicPartition topicPartition = new TopicPartition(topicResult.name(), partitionResult.partitionIndex()); if (error == Errors.NONE) { completed.put(topicPartition, new DeletedRecords(partitionResult.lowWatermark())); } else { handlePartitionError(topicPartition, error, failed, unmapped, retriable); } } } // Sanity-check if the current leader for these partitions returned results for all of them for (TopicPartition topicPartition : keys) { if (unmapped.isEmpty() && !completed.containsKey(topicPartition) && !failed.containsKey(topicPartition) && !retriable.contains(topicPartition) ) { ApiException sanityCheckException = new ApiException( "The response from broker " + broker.id() + " did not contain a result for topic partition " + topicPartition); log.error( "DeleteRecords request for topic partition {} failed sanity check", topicPartition, sanityCheckException); failed.put(topicPartition, sanityCheckException); } } return new ApiResult<>(completed, failed, unmapped); }
@Test public void testHandleResponseSanityCheck() { TopicPartition errorPartition = t0p0; Map<TopicPartition, RecordsToDelete> recordsToDeleteMap = new HashMap<>(recordsToDelete); recordsToDeleteMap.remove(errorPartition); AdminApiHandler.ApiResult<TopicPartition, DeletedRecords> result = handleResponse(createResponse(emptyMap(), recordsToDeleteMap.keySet())); assertEquals(recordsToDelete.size() - 1, result.completedKeys.size()); assertEquals(1, result.failedKeys.size()); assertEquals(errorPartition, result.failedKeys.keySet().iterator().next()); String sanityCheckMessage = result.failedKeys.get(errorPartition).getMessage(); assertTrue(sanityCheckMessage.contains("did not contain a result for topic partition")); assertTrue(result.unmappedKeys.isEmpty()); }
public static @Nullable MetricsContainer getCurrentContainer() { MetricsContainer container = CONTAINER_FOR_THREAD.get().container; if (container == null && REPORTED_MISSING_CONTAINER.compareAndSet(false, true)) { if (isMetricsSupported()) { LOG.error( "Unable to update metrics on the current thread. Most likely caused by using metrics " + "outside the managed work-execution thread:\n {}", StringUtils.arrayToNewlines(Thread.currentThread().getStackTrace(), 10)); } else { // rate limiting this log as it can be emitted each time metrics incremented LOG.warn( "Reporting metrics are not supported in the current execution environment:\n {}", StringUtils.arrayToNewlines(Thread.currentThread().getStackTrace(), 10)); } } return container; }
@Test public void testBehavesWithoutMetricsContainer() { assertNull(MetricsEnvironment.getCurrentContainer()); }
@Override public void serialize(AvroWrapper<T> avroWrapper) throws IOException { mAvroDatumWriter.write(avroWrapper.datum(), mAvroEncoder); // This would be a lot faster if the Serializer interface had a flush() method // and the // Hadoop framework called it when needed. For now, we'll have to flush on every // record. mAvroEncoder.flush(); }
@Test void serialize() throws IOException { // Create a serializer. Schema writerSchema = Schema.create(Schema.Type.STRING); AvroSerializer<CharSequence> serializer = new AvroSerializer<>(writerSchema); // Check the writer schema. assertEquals(writerSchema, serializer.getWriterSchema()); // Serialize two records, 'record1' and 'record2'. ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.open(outputStream); serializer.serialize(new AvroKey<>("record1")); serializer.serialize(new AvroKey<>("record2")); serializer.close(); // Make sure the records were serialized correctly. ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); Schema readerSchema = Schema.create(Schema.Type.STRING); DatumReader<CharSequence> datumReader = new GenericDatumReader<>(readerSchema); Decoder decoder = DecoderFactory.get().binaryDecoder(inputStream, null); CharSequence record = null; record = datumReader.read(record, decoder); assertEquals("record1", record.toString()); record = datumReader.read(record, decoder); assertEquals("record2", record.toString()); inputStream.close(); }
public static boolean isSupportedVersion(final String ksqlServerVersion) { final KsqlVersion version; try { version = new KsqlVersion(ksqlServerVersion); } catch (IllegalArgumentException e) { throw new MigrationException("Could not parse ksqlDB server version to " + "verify compatibility. Version: " + ksqlServerVersion); } return version.isAtLeast(new KsqlVersion("6.0.")); }
@Test public void shouldReturnSupportedVersion() { assertThat(isSupportedVersion("v6.0.0"), is(true)); assertThat(isSupportedVersion("v6.1.0"), is(true)); assertThat(isSupportedVersion("v6.2.1"), is(true)); assertThat(isSupportedVersion("v0.10.0"), is(true)); assertThat(isSupportedVersion("v0.10.1"), is(true)); assertThat(isSupportedVersion("v0.11.0"), is(true)); assertThat(isSupportedVersion("v0.14.0"), is(true)); assertThat(isSupportedVersion("v0.14.0-rc899"), is(true)); assertThat(isSupportedVersion("v0.14.0-rc899-ksqldb"), is(true)); assertThat(isSupportedVersion("6.0.0"), is(true)); assertThat(isSupportedVersion("6.1.0"), is(true)); assertThat(isSupportedVersion("6.2.1"), is(true)); assertThat(isSupportedVersion("0.10.0"), is(true)); assertThat(isSupportedVersion("0.10.1"), is(true)); assertThat(isSupportedVersion("0.11.0"), is(true)); assertThat(isSupportedVersion("0.14.0"), is(true)); assertThat(isSupportedVersion("0.14.0-rc899"), is(true)); assertThat(isSupportedVersion("0.14.0-rc899-ksqldb"), is(true)); }
@Override public void deleteFileConfig(Long id) { // 校验存在 FileConfigDO config = validateFileConfigExists(id); if (Boolean.TRUE.equals(config.getMaster())) { throw exception(FILE_CONFIG_DELETE_FAIL_MASTER); } // 删除 fileConfigMapper.deleteById(id); // 清空缓存 clearCache(id, null); }
@Test public void testDeleteFileConfig_success() { // mock 数据 FileConfigDO dbFileConfig = randomFileConfigDO().setMaster(false); fileConfigMapper.insert(dbFileConfig);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbFileConfig.getId(); // 调用 fileConfigService.deleteFileConfig(id); // 校验数据不存在了 assertNull(fileConfigMapper.selectById(id)); // 验证 cache assertNull(fileConfigService.getClientCache().getIfPresent(id)); }
@ConstantFunction(name = "bitShiftRight", argTypes = {BIGINT, BIGINT}, returnType = BIGINT) public static ConstantOperator bitShiftRightBigint(ConstantOperator first, ConstantOperator second) { return ConstantOperator.createBigint(first.getBigint() >> second.getBigint()); }
@Test public void bitShiftRightBigint() { assertEquals(12, ScalarOperatorFunctions.bitShiftRightBigint(O_BI_100, O_BI_3).getBigint()); }
@Override public void close() throws IOException { try { queue.offer(IdStatusPair.END_SEMAPHORE, 60, TimeUnit.SECONDS); } catch (InterruptedException e) { return; } try { reportWorkerFuture.get(60, TimeUnit.SECONDS); } catch (ExecutionException e) { LOG.error("problem closing", e); throw new RuntimeException(e); } catch (TimeoutException e) { LOG.error("timeout closing", e); } catch (InterruptedException e) { // } finally { reportWorkerFuture.cancel(true); } }
@Test public void testIncludes(@TempDir Path tmpDir) throws Exception { Files.createDirectories(tmpDir.resolve("db")); Path dbDir = tmpDir.resolve("db/h2"); Path config = tmpDir.resolve("tika-config.xml"); String connectionString = "jdbc:h2:file:" + dbDir.toAbsolutePath(); writeConfig("/configs/tika-config-includes.xml", connectionString, config); AsyncConfig asyncConfig = AsyncConfig.load(config); PipesReporter reporter = asyncConfig.getPipesReporter(); int numThreads = 10; int numIterations = 200; Map<PipesResult.STATUS, Long> expected = runBatch(reporter, numThreads, numIterations); reporter.close(); Map<PipesResult.STATUS, Long> total = countReported(connectionString); assertEquals(2, total.size()); long sum = 0; for (Map.Entry<PipesResult.STATUS, Long> e : expected.entrySet()) { if (e.getKey() == PARSE_SUCCESS || e.getKey() == PARSE_SUCCESS_WITH_EXCEPTION) { assertTrue(total.containsKey(e.getKey()), e.getKey().toString()); assertEquals(e.getValue(), total.get(e.getKey()), e.getKey().toString()); } else { assertFalse(total.containsKey(e.getKey()), e.getKey().toString()); } sum += e.getValue(); } assertEquals(numThreads * numIterations, sum); }
public static Configuration getTimelineServiceHBaseConf(Configuration conf) throws IOException { if (conf == null) { throw new NullPointerException(); } Configuration hbaseConf; String timelineServiceHBaseConfFilePath = conf.get(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE); if (timelineServiceHBaseConfFilePath != null && timelineServiceHBaseConfFilePath.length() > 0) { LOG.info("Using hbase configuration at " + timelineServiceHBaseConfFilePath); // create a clone so that we don't mess with out input one hbaseConf = new Configuration(conf); Configuration plainHBaseConf = new Configuration(false); Path hbaseConfigPath = new Path(timelineServiceHBaseConfFilePath); try (FileSystem fs = FileSystem.newInstance(hbaseConfigPath.toUri(), conf); FSDataInputStream in = fs.open(hbaseConfigPath)) { plainHBaseConf.addResource(in); HBaseConfiguration.merge(hbaseConf, plainHBaseConf); } } else { // default to what is on the classpath hbaseConf = HBaseConfiguration.create(conf); } return hbaseConf; }
@Test void testGetTimelineServiceHBaseConfNullArgument() throws Exception { assertThrows(NullPointerException.class, () -> { HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(null); }); }
public Future<Void> reconcile() { LOGGER.infoCr(reconciliation, "Deleting all the ZooKeeper related resources"); return jmxSecret() .compose(i -> deleteNetworkPolicy()) .compose(i -> deleteServiceAccount()) .compose(i -> deleteService()) .compose(i -> deleteHeadlessService()) .compose(i -> deleteCertificateSecret()) .compose(i -> deleteLoggingAndMetricsConfigMap()) .compose(i -> deletePodDisruptionBudget()) .compose(i -> deletePodSet()) .compose(i -> deletePersistentClaims()); }
@Test public void testZookeeperEraserReconcilePVCDeletionWithDeleteClaimFalse(VertxTestContext context) { Kafka patchedKafka = new KafkaBuilder(KAFKA) .editOrNewSpec() .withNewZookeeper() .withReplicas(3) .withNewPersistentClaimStorage() .withSize("123") .withStorageClass("foo") .withDeleteClaim(false) .endPersistentClaimStorage() .endZookeeper() .endSpec() .build(); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; ServiceOperator mockServiceOps = supplier.serviceOperations; NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; ConfigMapOperator mockCmOps = supplier.configMapOperations; StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator; SecretOperator mockSecretOps = supplier.secretOperations; PvcOperator mockPvcOps = supplier.pvcOperations; SharedEnvironmentProvider sharedEnvironmentProvider = supplier.sharedEnvironmentProvider; ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(RECONCILIATION, patchedKafka, VERSIONS, sharedEnvironmentProvider); ArgumentCaptor<String> podSetDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockPodSetOps.deleteAsync(any(), anyString(), podSetDeletionCaptor.capture(), anyBoolean())).thenAnswer(i -> Future.succeededFuture()); ArgumentCaptor<String> secretDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockSecretOps.deleteAsync(any(), anyString(), secretDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> saDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockSaOps.deleteAsync(any(), anyString(), saDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> serviceDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockServiceOps.deleteAsync(any(), anyString(), serviceDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> netPolicyDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockNetPolicyOps.deleteAsync(any(), anyString(), netPolicyDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> cmDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockCmOps.deleteAsync(any(), anyString(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> pdbDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockPdbOps.deleteAsync(any(), anyString(), pdbDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); // Mock the PVC Operator Map<String, PersistentVolumeClaim> zkPvcs = createZooPvcs(NAMESPACE, zkCluster.getStorage(), zkCluster.nodes(), (replica, storageId) -> VolumeUtils.DATA_VOLUME_NAME + "-" + KafkaResources.zookeeperPodName(patchedKafka.getMetadata().getName(), replica), deleteClaim(patchedKafka.getSpec().getZookeeper().getStorage())); ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); when(mockPvcOps.getAsync(anyString(), ArgumentMatchers.startsWith("data-"))) .thenAnswer(invocation -> { String pvcName = invocation.getArgument(1); if (pvcName.contains(zkCluster.getComponentName())) { return Future.succeededFuture(zkPvcs.get(pvcName)); } return Future.succeededFuture(null); }); when(mockPvcOps.listAsync(anyString(), ArgumentMatchers.any(Labels.class))) .thenAnswer(invocation -> Future.succeededFuture(zkPvcs.values().stream().toList())); // test reconcile ZooKeeperEraser zkEraser = new ZooKeeperEraser( RECONCILIATION, supplier ); Checkpoint async = context.checkpoint(); zkEraser.reconcile() .onComplete(context.succeeding(v -> context.verify(() -> { verify(mockCmOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockSaOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockServiceOps, times(2)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockSecretOps, times(2)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockNetPolicyOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockPodSetOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockPdbOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); assertThat(netPolicyDeletionCaptor.getAllValues(), is(List.of("my-cluster-network-policy-zookeeper"))); assertThat(serviceDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-client", "my-cluster-zookeeper-nodes"))); assertThat(saDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); assertThat(secretDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-jmx", "my-cluster-zookeeper-nodes"))); assertThat(podSetDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); assertThat(cmDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-config"))); assertThat(pdbDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); // Check PVCs verify(mockPvcOps, times(3)).getAsync(any(), any()); verify(mockPvcOps, times(1)).listAsync(any(), ArgumentMatchers.any(Labels.class)); // no reconcile since there was no PVC deletion verify(mockPvcOps, never()).reconcile(any(), any(), any(), any()); assertThat(pvcCaptor.getAllValues().size(), is(0)); async.flag(); }))); }
public static RuntimeMetric merge(RuntimeMetric metric1, RuntimeMetric metric2) { if (metric1 == null) { return metric2; } if (metric2 == null) { return metric1; } checkState(metric1.getUnit() == metric2.getUnit(), "Two metrics to be merged must have the same unit type."); RuntimeMetric mergedMetric = copyOf(metric1); mergedMetric.mergeWith(metric2); return mergedMetric; }
@Test public void testMerge() { RuntimeMetric metric1 = new RuntimeMetric(TEST_METRIC_NAME, NONE, 5, 2, 4, 1); RuntimeMetric metric2 = new RuntimeMetric(TEST_METRIC_NAME, NONE, 20, 2, 11, 9); assertRuntimeMetricEquals(RuntimeMetric.merge(metric1, metric2), new RuntimeMetric(TEST_METRIC_NAME, NONE, 25, 4, 11, 1)); assertRuntimeMetricEquals(metric1, new RuntimeMetric(TEST_METRIC_NAME, NONE, 5, 2, 4, 1)); assertRuntimeMetricEquals(metric2, new RuntimeMetric(TEST_METRIC_NAME, NONE, 20, 2, 11, 9)); }
public int computeThreshold(StreamConfig streamConfig, CommittingSegmentDescriptor committingSegmentDescriptor, @Nullable SegmentZKMetadata committingSegmentZKMetadata, String newSegmentName) { long desiredSegmentSizeBytes = streamConfig.getFlushThresholdSegmentSizeBytes(); if (desiredSegmentSizeBytes <= 0) { desiredSegmentSizeBytes = StreamConfig.DEFAULT_FLUSH_THRESHOLD_SEGMENT_SIZE_BYTES; } long optimalSegmentSizeBytesMin = desiredSegmentSizeBytes / 2; double optimalSegmentSizeBytesMax = desiredSegmentSizeBytes * 1.5; if (committingSegmentZKMetadata == null) { // first segment of the partition, hence committing segment is null if (_latestSegmentRowsToSizeRatio > 0) { // new partition group added case long targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio); targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment zk metadata is not available, using prev ratio {}, setting rows threshold for {} as {}", _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; } else { final int autotuneInitialRows = streamConfig.getFlushAutotuneInitialRows(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment zk metadata is not available, setting threshold for {} as {}", newSegmentName, autotuneInitialRows); return autotuneInitialRows; } } final long committingSegmentSizeBytes = committingSegmentDescriptor.getSegmentSizeBytes(); if (committingSegmentSizeBytes <= 0 // repair segment case || SegmentCompletionProtocol.REASON_FORCE_COMMIT_MESSAGE_RECEIVED.equals( committingSegmentDescriptor.getStopReason())) { String reason = committingSegmentSizeBytes <= 0 // ? "Committing segment size is not available" // : "Committing segment is due to force-commit"; final int targetNumRows = committingSegmentZKMetadata.getSizeThresholdToFlushSegment(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info("{}, setting thresholds from previous segment for {} as {}", reason, newSegmentName, targetNumRows); return targetNumRows; } final long timeConsumed = _clock.millis() - committingSegmentZKMetadata.getCreationTime(); final long numRowsConsumed = committingSegmentZKMetadata.getTotalDocs(); final int numRowsThreshold = committingSegmentZKMetadata.getSizeThresholdToFlushSegment(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "{}: Data from committing segment: Time {} numRows {} threshold {} segmentSize(bytes) {}", newSegmentName, TimeUtils.convertMillisToPeriod(timeConsumed), numRowsConsumed, numRowsThreshold, committingSegmentSizeBytes); double currentRatio = (double) numRowsConsumed / committingSegmentSizeBytes; if (_latestSegmentRowsToSizeRatio > 0) { _latestSegmentRowsToSizeRatio = CURRENT_SEGMENT_RATIO_WEIGHT * currentRatio + PREVIOUS_SEGMENT_RATIO_WEIGHT * _latestSegmentRowsToSizeRatio; } else { _latestSegmentRowsToSizeRatio = currentRatio; } // If the number of rows consumed is less than what we set as target in metadata, then the segment hit time limit. // We can set the new target to be slightly higher than the actual number of rows consumed so that we can aim // to hit the row limit next time around. // // If the size of the committing segment is higher than the desired segment size, then the administrator has // set a lower segment size threshold. We should treat this case as if we have hit thw row limit and not the time // limit. // // TODO: add feature to adjust time threshold as well // If we set new threshold to be numRowsConsumed, we might keep oscillating back and forth between doubling limit // and time threshold being hit If we set new threshold to be committingSegmentZKMetadata // .getSizeThresholdToFlushSegment(), // we might end up using a lot more memory than required for the segment Using a minor bump strategy, until // we add feature to adjust time We will only slightly bump the threshold based on numRowsConsumed if (numRowsConsumed < numRowsThreshold && committingSegmentSizeBytes < desiredSegmentSizeBytes) { final long timeThresholdMillis = streamConfig.getFlushThresholdTimeMillis(); long currentNumRows = numRowsConsumed; StringBuilder logStringBuilder = new StringBuilder().append("Time threshold reached. "); if (timeThresholdMillis < timeConsumed) { // The administrator has reduced the time threshold. Adjust the // number of rows to match the average consumption rate on the partition. currentNumRows = timeThresholdMillis * numRowsConsumed / timeConsumed; logStringBuilder.append(" Detected lower time threshold, adjusting numRowsConsumed to ").append(currentNumRows) .append(". "); } long targetSegmentNumRows = (long) (currentNumRows * ROWS_MULTIPLIER_WHEN_TIME_THRESHOLD_HIT); targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); logStringBuilder.append("Setting segment size for {} as {}"); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(logStringBuilder.toString(), newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; } long targetSegmentNumRows; if (committingSegmentSizeBytes < optimalSegmentSizeBytesMin) { targetSegmentNumRows = numRowsConsumed + numRowsConsumed / 2; } else if (committingSegmentSizeBytes > optimalSegmentSizeBytesMax) { targetSegmentNumRows = numRowsConsumed / 2; } else { if (_latestSegmentRowsToSizeRatio > 0) { targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio); } else { targetSegmentNumRows = (long) (desiredSegmentSizeBytes * currentRatio); } } targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment size {}, current ratio {}, setting threshold for {} as {}", committingSegmentSizeBytes, _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; }
@Test public void testUseAutoTuneInitialRowsIfFirstSegmentInPartition() { int autoTuneInitialRows = 1_000; SegmentFlushThresholdComputer computer = new SegmentFlushThresholdComputer(); StreamConfig streamConfig = mock(StreamConfig.class); when(streamConfig.getFlushAutotuneInitialRows()).thenReturn(autoTuneInitialRows); CommittingSegmentDescriptor committingSegmentDescriptor = mock(CommittingSegmentDescriptor.class); int threshold = computer.computeThreshold(streamConfig, committingSegmentDescriptor, null, "newSegmentName"); assertEquals(threshold, autoTuneInitialRows); }
public void setScmId(String scmId) { this.scmId = scmId; }
@Test public void shouldAddErrorWhenMatchingScmConfigDoesNotExist() { PipelineConfigSaveValidationContext validationContext = mock(PipelineConfigSaveValidationContext.class); when(validationContext.findScmById(anyString())).thenReturn(null); SCM scmConfig = mock(SCM.class); when(scmConfig.doesPluginExist()).thenReturn(true); PluggableSCMMaterialConfig pluggableSCMMaterialConfig = new PluggableSCMMaterialConfig(null, scmConfig, "usr/home", null, false); pluggableSCMMaterialConfig.setScmId("scm-id"); pluggableSCMMaterialConfig.validateTree(validationContext); assertThat(pluggableSCMMaterialConfig.errors().getAll().size(), is(1)); assertThat(pluggableSCMMaterialConfig.errors().on(PluggableSCMMaterialConfig.SCM_ID), is("Could not find SCM for given scm-id: [scm-id].")); }
public static String buildFormPostContent(final WebContext context) { val requestedUrl = context.getFullRequestURL(); val parameters = context.getRequestParameters(); val buffer = new StringBuilder(); buffer.append("<html>\n"); buffer.append("<body>\n"); buffer.append("<form action=\"" + escapeHtml(requestedUrl) + "\" name=\"f\" method=\"post\">\n"); if (parameters != null) { for (val entry : parameters.entrySet()) { val values = entry.getValue(); if (values != null && values.length > 0) { buffer.append("<input type='hidden' name=\"" + escapeHtml(entry.getKey()) + "\" value=\"" + values[0] + "\" />\n"); } } } buffer.append("<input value='POST' type='submit' />\n"); buffer.append("</form>\n"); buffer.append("<script type='text/javascript'>document.forms['f'].submit();</script>\n"); buffer.append("</body>\n"); buffer.append("</html>\n"); return buffer.toString(); }
@Test public void testBuildFormPostContent() { val content = HttpActionHelper.buildFormPostContent(MockWebContext.create().setFullRequestURL(CALLBACK_URL)); assertEquals("<html>\n<body>\n<form action=\"" + CALLBACK_URL + "\" name=\"f\" method=\"post\">\n" + "<input value='POST' type='submit' />\n</form>\n" + "<script type='text/javascript'>document.forms['f'].submit();</script>\n" + "</body>\n</html>\n", content); }
@Override public boolean isDevelopment() { return original.isDevelopment(); }
@Test public void isDevelopment() { assertEquals(pluginManager.isDevelopment(), wrappedPluginManager.isDevelopment()); }
public ServerStatus getServerStatus() { return serverStatus; }
@Test void testUpdaterFromConsistency3() { ServerStatusManager serverStatusManager = new ServerStatusManager(protocolManager, switchDomain); ServerStatusManager.ServerStatusUpdater updater = serverStatusManager.new ServerStatusUpdater(); //then updater.run(); //then assertEquals(ServerStatus.DOWN, serverStatusManager.getServerStatus()); }
@Override public String convert(ILoggingEvent event) { Map<String, String> mdcPropertyMap = event.getMDCPropertyMap(); if (mdcPropertyMap == null) { return defaultValue; } if (key == null) { return outputMDCForAllKeys(mdcPropertyMap); } else { String value = mdcPropertyMap.get(key); if (value != null) { return value; } else { return defaultValue; } } }
@Test public void testConvertWithOneEntry() { String k = "MDCConverterTest_k" + diff; String v = "MDCConverterTest_v" + diff; logbackMDCAdapter.put(k, v); ILoggingEvent le = createLoggingEvent(); String result = converter.convert(le); assertEquals(k + "=" + v, result); }
@PutMapping @Secured(resource = AuthConstants.UPDATE_PASSWORD_ENTRY_POINT, action = ActionTypes.WRITE) public Object updateUser(@RequestParam String username, @RequestParam String newPassword, HttpServletResponse response, HttpServletRequest request) throws IOException { // admin or same user try { if (!hasPermission(username, request)) { response.sendError(HttpServletResponse.SC_FORBIDDEN, "authorization failed!"); return null; } } catch (HttpSessionRequiredException e) { response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "session expired!"); return null; } catch (AccessException exception) { response.sendError(HttpServletResponse.SC_FORBIDDEN, "authorization failed!"); return null; } User user = userDetailsService.getUserFromDatabase(username); if (user == null) { throw new IllegalArgumentException("user " + username + " not exist!"); } userDetailsService.updateUserPassword(username, PasswordEncoderUtil.encode(newPassword)); return RestResultUtils.success("update user ok!"); }
@Test void testUpdateUser1() throws IOException { when(authConfigs.isAuthEnabled()).thenReturn(false); when(userDetailsService.getUserFromDatabase(anyString())).thenReturn(new User()); MockHttpServletRequest mockHttpServletRequest = new MockHttpServletRequest(); MockHttpServletResponse mockHttpServletResponse = new MockHttpServletResponse(); RestResult<String> result = (RestResult<String>) userController.updateUser("nacos", "test", mockHttpServletResponse, mockHttpServletRequest); assertEquals(200, result.getCode()); }
int addRawRecords(final Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (final ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { fifoQueue.addLast(rawRecord); } updateHead(); return size(); }
@Test public void shouldThrowStreamsExceptionWhenKeyDeserializationFails() { final byte[] key = Serdes.Long().serializer().serialize("foo", 1L); final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList( new ConsumerRecord<>("topic", 1, 1, 0L, TimestampType.CREATE_TIME, 0, 0, key, recordValue, new RecordHeaders(), Optional.empty())); final StreamsException exception = assertThrows( StreamsException.class, () -> queue.addRawRecords(records) ); assertThat(exception.getCause(), instanceOf(SerializationException.class)); }
public static Schema getBeamSchemaFromProto(String fileDescriptorPath, String messageName) { ProtoSchemaInfo dpd = getProtoDomain(fileDescriptorPath, messageName); ProtoDomain protoDomain = dpd.getProtoDomain(); return ProtoDynamicMessageSchema.forDescriptor(protoDomain, messageName).getSchema(); }
@Test public void testProtoSchemaToBeamSchema() { Schema schema = ProtoByteUtils.getBeamSchemaFromProto(DESCRIPTOR_PATH, MESSAGE_NAME); Assert.assertEquals(schema.getFieldNames(), SCHEMA.getFieldNames()); }
@Override public List<ImportValidationFeedback> verifyRule( Object subject ) { List<ImportValidationFeedback> feedback = new ArrayList<>(); if ( !isEnabled() || !( subject instanceof JobMeta ) ) { return feedback; } JobMeta jobMeta = (JobMeta) subject; String description = jobMeta.getDescription(); if ( null != description && minLength <= description.length() ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.APPROVAL, "A description is present" ) ); } else { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.ERROR, "A description is not present or too short" ) ); } return feedback; }
@Test public void testVerifyRule_ShortDescription_EnabledRule() { JobHasDescriptionImportRule importRule = getImportRule( 10, true ); JobMeta jobMeta = new JobMeta(); jobMeta.setDescription( "short" ); List<ImportValidationFeedback> feedbackList = importRule.verifyRule( jobMeta ); assertNotNull( feedbackList ); assertFalse( feedbackList.isEmpty() ); ImportValidationFeedback feedback = feedbackList.get( 0 ); assertNotNull( feedback ); assertEquals( ImportValidationResultType.ERROR, feedback.getResultType() ); assertTrue( feedback.isError() ); }
public boolean isSet(PropertyKey key) { if (mUserProps.containsKey(key)) { Optional<Object> val = mUserProps.get(key); if (val.isPresent()) { return true; } } // In case key is not the reference to the original key return PropertyKey.fromString(key.toString()).getDefaultValue() != null; }
@Test public void isSet() { assertTrue(mProperties.isSet(mKeyWithValue)); assertFalse(mProperties.isSet(mKeyWithoutValue)); mProperties.remove(mKeyWithValue); mProperties.put(mKeyWithoutValue, "value", Source.RUNTIME); assertTrue(mProperties.isSet(mKeyWithValue)); assertTrue(mProperties.isSet(mKeyWithoutValue)); }
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) { CharStream input = CharStreams.fromString(source); FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input ); CommonTokenStream tokens = new CommonTokenStream( lexer ); FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens ); ParserHelper parserHelper = new ParserHelper(eventsManager); additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol())); parser.setHelper(parserHelper); parser.setErrorHandler( new FEELErrorHandler() ); parser.removeErrorListeners(); // removes the error listener that prints to the console parser.addErrorListener( new FEELParserErrorListener( eventsManager ) ); // pre-loads the parser with symbols defineVariables( inputVariableTypes, inputVariables, parser ); if (typeRegistry != null) { parserHelper.setTypeRegistry(typeRegistry); } return parser; }
@Test void contextWithMultipleEntries() { String inputExpression = "{ \"a string key\" : 10," + " a non-string key : foo+bar," + " a key.with + /' odd chars : [10..50] }"; BaseNode ctxbase = parse( inputExpression, mapOf(entry("foo", BuiltInType.NUMBER), entry("bar", BuiltInType.NUMBER))); assertThat( ctxbase).isInstanceOf(ContextNode.class); assertThat( ctxbase.getText()).isEqualTo(inputExpression); ContextNode ctx = (ContextNode) ctxbase; assertThat( ctx.getEntries()).hasSize(3); ContextEntryNode entry = ctx.getEntries().get( 0 ); assertThat(entry.getName()).isInstanceOf(StringNode.class); StringNode nameNode = (StringNode) entry.getName(); assertThat(nameNode.getText()).isNotNull(); assertThat(nameNode.getText()).isEqualTo("\"a string key\""); // Reference String literal test, BaseNode#getText() return the FEEL equivalent expression, in this case quoted. assertThat( entry.getValue()).isInstanceOf(NumberNode.class); assertThat( entry.getResultType()).isEqualTo(BuiltInType.NUMBER); assertThat( entry.getValue().getText()).isEqualTo("10"); entry = ctx.getEntries().get( 1 ); assertThat( entry.getName()).isInstanceOf(NameDefNode.class); NameDefNode name = (NameDefNode) entry.getName(); assertThat( name.getParts()).isNotNull(); assertThat( name.getParts()).hasSize(5); assertThat( entry.getName().getText()).isEqualTo("a non-string key"); assertThat( entry.getValue()).isInstanceOf(InfixOpNode.class); assertThat( entry.getResultType()).isEqualTo(BuiltInType.NUMBER); assertThat( entry.getValue().getText()).isEqualTo( "foo+bar"); entry = ctx.getEntries().get( 2 ); assertThat( entry.getName()).isInstanceOf(NameDefNode.class); name = (NameDefNode) entry.getName(); assertThat( name.getParts()).isNotNull(); assertThat( name.getParts()).hasSize(9); assertThat( entry.getName().getText()).isEqualTo("a key.with + /' odd chars"); assertThat( entry.getValue()).isInstanceOf(RangeNode.class); assertThat( entry.getResultType()).isEqualTo(BuiltInType.RANGE); assertThat( entry.getValue().getText()).isEqualTo( "[10..50]"); }
@Override public void close() { close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); }
@Test public void testVerifyApplicationEventOnShutdown() { consumer = newConsumer(); completeShareAcknowledgeOnCloseApplicationEventSuccessfully(); completeShareUnsubscribeApplicationEventSuccessfully(); consumer.close(); verify(applicationEventHandler).addAndGet(any(ShareAcknowledgeOnCloseEvent.class)); verify(applicationEventHandler).add(any(ShareUnsubscribeEvent.class)); }
@InvokeOnHeader(Web3jConstants.ETH_GET_TRANSACTION_RECEIPT) void ethGetTransactionReceipt(Message message) throws IOException { String transactionHash = message.getHeader(Web3jConstants.TRANSACTION_HASH, configuration::getTransactionHash, String.class); Request<?, EthGetTransactionReceipt> request = web3j.ethGetTransactionReceipt(transactionHash); setRequestId(message, request); EthGetTransactionReceipt response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getTransactionReceipt()); } }
@Test public void ethGetTransactionReceiptTest() throws Exception { EthGetTransactionReceipt response = Mockito.mock(EthGetTransactionReceipt.class); Mockito.when(mockWeb3j.ethGetTransactionReceipt(any())).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.getTransactionReceipt()).thenReturn(Mockito.mock(Optional.class)); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_GET_TRANSACTION_RECEIPT); template.send(exchange); Optional<Transaction> body = exchange.getIn().getBody(Optional.class); assertNotNull(body); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test public void testMultimapModifyAfterReadDoesNotAffectResult() { final String tag = "multimap"; StateTag<MultimapState<byte[], Integer>> addr = StateTags.multimap(tag, ByteArrayCoder.of(), VarIntCoder.of()); MultimapState<byte[], Integer> multimapState = underTest.state(NAMESPACE, addr); final byte[] key1 = "key1".getBytes(StandardCharsets.UTF_8); final byte[] key2 = "key2".getBytes(StandardCharsets.UTF_8); final byte[] key3 = "key3".getBytes(StandardCharsets.UTF_8); final byte[] key4 = "key4".getBytes(StandardCharsets.UTF_8); SettableFuture<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> entriesFuture = SettableFuture.create(); when(mockReader.multimapFetchAllFuture( false, key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of())) .thenReturn(entriesFuture); SettableFuture<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> keysFuture = SettableFuture.create(); when(mockReader.multimapFetchAllFuture( true, key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of())) .thenReturn(keysFuture); SettableFuture<Iterable<Integer>> getKey1Future = SettableFuture.create(); SettableFuture<Iterable<Integer>> getKey2Future = SettableFuture.create(); SettableFuture<Iterable<Integer>> getKey4Future = SettableFuture.create(); when(mockReader.multimapFetchSingleEntryFuture( encodeWithCoder(key1, ByteArrayCoder.of()), key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of())) .thenReturn(getKey1Future); when(mockReader.multimapFetchSingleEntryFuture( encodeWithCoder(key2, ByteArrayCoder.of()), key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of())) .thenReturn(getKey2Future); when(mockReader.multimapFetchSingleEntryFuture( encodeWithCoder(key4, ByteArrayCoder.of()), key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of())) .thenReturn(getKey4Future); ReadableState<Iterable<Map.Entry<byte[], Integer>>> entriesResult = multimapState.entries().readLater(); ReadableState<Iterable<byte[]>> keysResult = multimapState.keys().readLater(); waitAndSet( entriesFuture, Arrays.asList(multimapEntry(key1, 1, 2, 3), multimapEntry(key2, 2, 3, 4)), 200); waitAndSet(keysFuture, Arrays.asList(multimapEntry(key1), multimapEntry(key2)), 200); // make key4 to be known nonexistent. multimapState.remove(key4); ReadableState<Iterable<Integer>> key1Future = multimapState.get(key1).readLater(); waitAndSet(getKey1Future, Arrays.asList(1, 2, 3), 200); ReadableState<Iterable<Integer>> key2Future = multimapState.get(key2).readLater(); waitAndSet(getKey2Future, Arrays.asList(2, 3, 4), 200); ReadableState<Iterable<Integer>> key4Future = multimapState.get(key4).readLater(); waitAndSet(getKey4Future, Collections.emptyList(), 200); multimapState.put(key1, 7); multimapState.put(dup(key2), 8); multimapState.put(dup(key3), 8); Iterable<Map.Entry<byte[], Integer>> entries = entriesResult.read(); Iterable<byte[]> keys = keysResult.read(); Iterable<Integer> key1Values = key1Future.read(); Iterable<Integer> key2Values = key2Future.read(); Iterable<Integer> key4Values = key4Future.read(); // values added/removed after read should not be reflected in result multimapState.remove(key1); multimapState.put(key2, 9); multimapState.put(key4, 10); assertEquals(9, Iterables.size(entries)); assertThat( entries, Matchers.containsInAnyOrder( multimapEntryMatcher(key1, 1), multimapEntryMatcher(key1, 2), multimapEntryMatcher(key1, 3), multimapEntryMatcher(key1, 7), multimapEntryMatcher(key2, 4), multimapEntryMatcher(key2, 2), multimapEntryMatcher(key2, 3), multimapEntryMatcher(key2, 8), multimapEntryMatcher(key3, 8))); assertEquals(3, Iterables.size(keys)); assertThat(keys, Matchers.containsInAnyOrder(key1, key2, key3)); assertEquals(4, Iterables.size(key1Values)); assertThat(key1Values, Matchers.containsInAnyOrder(1, 2, 3, 7)); assertEquals(4, Iterables.size(key2Values)); assertThat(key2Values, Matchers.containsInAnyOrder(2, 3, 4, 8)); assertTrue(Iterables.isEmpty(key4Values)); }
@Override public String getName() { return _name; }
@Test public void testIsNullOperator() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("%s IS NULL", BIG_DECIMAL_SV_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof IsNullTransformFunction); assertEquals(transformFunction.getName(), "is_null"); int[] expectedValues = new int[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { expectedValues[i] = (_bigDecimalSVValues[i] == null) ? 1 : 0; } testTransformFunction(transformFunction, expectedValues); }
@Override public Future<Channel> acquire(final Promise<Channel> promise) { try { if (executor.inEventLoop()) { acquire0(promise); } else { executor.execute(new Runnable() { @Override public void run() { acquire0(promise); } }); } } catch (Throwable cause) { promise.tryFailure(cause); } return promise; }
@Test public void testAcquire() throws Exception { LocalAddress addr = new LocalAddress(getLocalAddrId()); Bootstrap cb = new Bootstrap(); cb.remoteAddress(addr); cb.group(group) .channel(LocalChannel.class); ServerBootstrap sb = new ServerBootstrap(); sb.group(group) .channel(LocalServerChannel.class) .childHandler(new ChannelInitializer<LocalChannel>() { @Override public void initChannel(LocalChannel ch) throws Exception { ch.pipeline().addLast(new ChannelInboundHandlerAdapter()); } }); // Start server Channel sc = sb.bind(addr).syncUninterruptibly().channel(); CountingChannelPoolHandler handler = new CountingChannelPoolHandler(); ChannelPool pool = new FixedChannelPool(cb, handler, 1, Integer.MAX_VALUE); Channel channel = pool.acquire().syncUninterruptibly().getNow(); Future<Channel> future = pool.acquire(); assertFalse(future.isDone()); pool.release(channel).syncUninterruptibly(); assertTrue(future.await(1, TimeUnit.SECONDS)); Channel channel2 = future.getNow(); assertSame(channel, channel2); assertEquals(1, handler.channelCount()); assertEquals(2, handler.acquiredCount()); assertEquals(1, handler.releasedCount()); sc.close().syncUninterruptibly(); channel2.close().syncUninterruptibly(); pool.close(); }
@Override public void execute(Exchange exchange) throws SmppException { byte[] message = getShortMessage(exchange.getIn()); ReplaceSm replaceSm = createReplaceSmTempate(exchange); replaceSm.setShortMessage(message); if (log.isDebugEnabled()) { log.debug("Sending replacement command for a short message for exchange id '{}' and message id '{}'", exchange.getExchangeId(), replaceSm.getMessageId()); } try { session.replaceShortMessage( replaceSm.getMessageId(), TypeOfNumber.valueOf(replaceSm.getSourceAddrTon()), NumberingPlanIndicator.valueOf(replaceSm.getSourceAddrNpi()), replaceSm.getSourceAddr(), replaceSm.getScheduleDeliveryTime(), replaceSm.getValidityPeriod(), new RegisteredDelivery(replaceSm.getRegisteredDelivery()), replaceSm.getSmDefaultMsgId(), replaceSm.getShortMessage()); } catch (Exception e) { throw new SmppException(e); } if (log.isDebugEnabled()) { log.debug("Sent replacement command for a short message for exchange id '{}' and message id '{}'", exchange.getExchangeId(), replaceSm.getMessageId()); } Message rspMsg = ExchangeHelper.getResultMessage(exchange); rspMsg.setHeader(SmppConstants.ID, replaceSm.getMessageId()); }
@Test public void executeWithConfigurationData() throws Exception { Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); exchange.getIn().setHeader(SmppConstants.COMMAND, "ReplaceSm"); exchange.getIn().setHeader(SmppConstants.ID, "1"); exchange.getIn().setBody("new short message body"); command.execute(exchange); verify(session).replaceShortMessage(eq("1"), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"), (String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)), eq((byte) 0), eq("new short message body".getBytes())); assertEquals("1", exchange.getMessage().getHeader(SmppConstants.ID)); }
public static Collection<ColumnMetaData> load(final Connection connection, final String tableNamePattern, final DatabaseType databaseType) throws SQLException { Collection<ColumnMetaData> result = new LinkedList<>(); Collection<String> primaryKeys = loadPrimaryKeys(connection, tableNamePattern); List<String> columnNames = new ArrayList<>(); List<Integer> columnTypes = new ArrayList<>(); List<Boolean> primaryKeyFlags = new ArrayList<>(); List<Boolean> caseSensitiveFlags = new ArrayList<>(); List<Boolean> nullableFlags = new ArrayList<>(); try (ResultSet resultSet = connection.getMetaData().getColumns(connection.getCatalog(), connection.getSchema(), tableNamePattern, "%")) { while (resultSet.next()) { String tableName = resultSet.getString(TABLE_NAME); if (Objects.equals(tableNamePattern, tableName)) { String columnName = resultSet.getString(COLUMN_NAME); columnTypes.add(resultSet.getInt(DATA_TYPE)); primaryKeyFlags.add(primaryKeys.contains(columnName)); nullableFlags.add("YES".equals(resultSet.getString(IS_NULLABLE))); columnNames.add(columnName); } } } try ( Statement statement = connection.createStatement(); ResultSet resultSet = statement.executeQuery(generateEmptyResultSQL(tableNamePattern, columnNames, databaseType))) { for (int i = 0; i < columnNames.size(); i++) { boolean generated = resultSet.getMetaData().isAutoIncrement(i + 1); caseSensitiveFlags.add(resultSet.getMetaData().isCaseSensitive(resultSet.findColumn(columnNames.get(i)))); result.add(new ColumnMetaData(columnNames.get(i), columnTypes.get(i), primaryKeyFlags.get(i), generated, caseSensitiveFlags.get(i), true, false, nullableFlags.get(i))); } } return result; }
@Test void assertLoad() throws SQLException { Collection<ColumnMetaData> actual = ColumnMetaDataLoader.load(connection, "tbl", databaseType); assertThat(actual.size(), is(2)); Iterator<ColumnMetaData> columnMetaDataIterator = actual.iterator(); assertColumnMetaData(columnMetaDataIterator.next(), "pk_col", Types.INTEGER, true, true); assertColumnMetaData(columnMetaDataIterator.next(), "col", Types.VARCHAR, false, false); }
public static boolean isMobile(String mobile) { return isMatch(MOBILE_REGEX, mobile); }
@Test public void testMobile() { Assert.assertEquals(true, PatternKit.isMobile("15900234821")); Assert.assertEquals(false, PatternKit.isMobile("11112232111")); }
public static PrivateKey readPemPrivateKey(InputStream pemStream) { return (PrivateKey) readPemKey(pemStream); }
@Test public void readPrivateKeyTest() { final PrivateKey privateKey = PemUtil.readPemPrivateKey(ResourceUtil.getStream("test_private_key.pem")); assertNotNull(privateKey); }
public int remainingCapacity() { lock.lock(); try { return capacity - queue.size(); } finally { lock.unlock(); } }
@Test public void testSpecifiedQueueCapacity() { final int capacity = 8_000; final FutureCompletingBlockingQueue<Object> queue = new FutureCompletingBlockingQueue<>(capacity); assertThat(queue.remainingCapacity()).isEqualTo(capacity); }
public boolean eval(ContentFile<?> file) { // TODO: detect the case where a column is missing from the file using file's max field id. return new MetricsEvalVisitor().eval(file); }
@Test public void testMissingStats() { DataFile missingStats = new TestDataFile("file.parquet", Row.of(), 50); Expression[] exprs = new Expression[] { lessThan("no_stats", 5), lessThanOrEqual("no_stats", 30), equal("no_stats", 70), greaterThan("no_stats", 78), greaterThanOrEqual("no_stats", 90), notEqual("no_stats", 101), isNull("no_stats"), notNull("no_stats"), isNaN("some_nans"), notNaN("some_nans") }; for (Expression expr : exprs) { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, expr).eval(missingStats); assertThat(shouldRead).as("Should read when missing stats for expr: " + expr).isTrue(); } }
@Override public <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return join(otherStream, toValueJoinerWithKey(joiner), windows); }
@Test public void shouldNotAllowNullJoinWindowsOnJoinWithStreamJoined() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.join( testStream, MockValueJoiner.TOSTRING_JOINER, null, StreamJoined.as("name"))); assertThat(exception.getMessage(), equalTo("windows can't be null")); }
@Override public Invocation unwrap() { return invocation; }
@Test void unwrap() { assertThat(request.unwrap()).isSameAs(invocation); }
public void removePluginData(final PluginData pluginData) { Optional.ofNullable(pluginData).ifPresent(data -> PLUGIN_MAP.remove(data.getName())); }
@Test public void testRemovePluginData() throws NoSuchFieldException, IllegalAccessException { PluginData pluginData = PluginData.builder().name(mockName1).build(); ConcurrentHashMap<String, PluginData> pluginMap = getFieldByName(pluginMapStr); pluginMap.put(mockName1, pluginData); assertNotNull(pluginMap.get(mockName1)); BaseDataCache.getInstance().removePluginData(pluginData); assertNull(pluginMap.get(mockName1)); }
@Nonnull public static <T> AggregateOperation1<T, LongLongAccumulator, Double> averagingLong( @Nonnull ToLongFunctionEx<? super T> getLongValueFn ) { checkSerializable(getLongValueFn, "getLongValueFn"); // count == accumulator.value1 // sum == accumulator.value2 return AggregateOperation .withCreate(LongLongAccumulator::new) .andAccumulate((LongLongAccumulator a, T i) -> { // a bit faster check than in addExact, specialized for increment if (a.get1() == Long.MAX_VALUE) { throw new ArithmeticException("Counter overflow"); } a.set1(a.get1() + 1); a.set2(Math.addExact(a.get2(), getLongValueFn.applyAsLong(i))); }) .andCombine((a1, a2) -> { a1.set1(Math.addExact(a1.get1(), a2.get1())); a1.set2(Math.addExact(a1.get2(), a2.get2())); }) .andDeduct((a1, a2) -> { a1.set1(Math.subtractExact(a1.get1(), a2.get1())); a1.set2(Math.subtractExact(a1.get2(), a2.get2())); }) .andExportFinish(a -> (double) a.get2() / a.get1()); }
@Test public void when_averagingLong() { validateOp(averagingLong(Long::longValue), identity(), 1L, 2L, new LongLongAccumulator(1, 1), new LongLongAccumulator(2, 3), 1.5); }
@Override public Metadata trailers() { return trailers; }
@Test void trailers() { assertThat(response.trailers()).isSameAs(trailers); }
public Map<String, String> subjectAltNames() { Map<String, String> san = new HashMap<>(); int i = 0; for (String name : dnsNames()) { san.put("DNS." + (i++), name); } i = 0; for (String ip : ipAddresses()) { san.put("IP." + (i++), ip); } return san; }
@Test public void testSubjectAlternativeNames() { Subject subject = new Subject.Builder() .withCommonName("joe") .withOrganizationName("MyOrg") .addDnsName("example.com") .addDnsName("example.org") .addDnsNames(List.of("example.cz", "example.co.uk")) .addIpAddress("123.123.123.123") .addIpAddress("127.0.0.1") .build(); assertEquals(Map.of( "IP.0", "123.123.123.123", "IP.1", "127.0.0.1", "DNS.0", "example.org", "DNS.1", "example.co.uk", "DNS.2", "example.com", "DNS.3", "example.cz"), subject.subjectAltNames()); }
public void checkPAT(String serverUrl, String token) { String url = String.format("%s/_apis/projects?%s", getTrimmedUrl(serverUrl), API_VERSION_3); doGet(token, url); }
@Test public void check_pat_with_server_error() { enqueueResponse(500); String serverUrl = server.url("").toString(); assertThatThrownBy(() -> underTest.checkPAT(serverUrl, "token")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Unable to contact Azure DevOps server"); }
protected Runnable takeTask() { assert inEventLoop(); if (!(taskQueue instanceof BlockingQueue)) { throw new UnsupportedOperationException(); } BlockingQueue<Runnable> taskQueue = (BlockingQueue<Runnable>) this.taskQueue; for (;;) { ScheduledFutureTask<?> scheduledTask = peekScheduledTask(); if (scheduledTask == null) { Runnable task = null; try { task = taskQueue.take(); if (task == WAKEUP_TASK) { task = null; } } catch (InterruptedException e) { // Ignore } return task; } else { long delayNanos = scheduledTask.delayNanos(); Runnable task = null; if (delayNanos > 0) { try { task = taskQueue.poll(delayNanos, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { // Waken up. return null; } } if (task == null) { // We need to fetch the scheduled tasks now as otherwise there may be a chance that // scheduled tasks are never executed if there is always one task in the taskQueue. // This is for example true for the read task of OIO Transport // See https://github.com/netty/netty/issues/1614 fetchFromScheduledTaskQueue(); task = taskQueue.poll(); } if (task != null) { if (task == WAKEUP_TASK) { return null; } return task; } } } }
@Test @Timeout(value = 5000, unit = TimeUnit.MILLISECONDS) public void testTakeTask() throws Exception { final SingleThreadEventExecutor executor = new SingleThreadEventExecutor(null, Executors.defaultThreadFactory(), true) { @Override protected void run() { while (!confirmShutdown()) { Runnable task = takeTask(); if (task != null) { task.run(); } } } }; //add task TestRunnable beforeTask = new TestRunnable(); executor.execute(beforeTask); //add scheduled task TestRunnable scheduledTask = new TestRunnable(); ScheduledFuture<?> f = executor.schedule(scheduledTask , 1500, TimeUnit.MILLISECONDS); //add task TestRunnable afterTask = new TestRunnable(); executor.execute(afterTask); f.sync(); assertThat(beforeTask.ran.get(), is(true)); assertThat(scheduledTask.ran.get(), is(true)); assertThat(afterTask.ran.get(), is(true)); }
@VisibleForTesting public static JobGraph createJobGraph(StreamGraph streamGraph) { return new StreamingJobGraphGenerator( Thread.currentThread().getContextClassLoader(), streamGraph, null, Runnable::run) .createJobGraph(); }
@Test void testTransformationSetParallelism() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // The default parallelism of the environment (that is inherited by the source) // and the parallelism of the map operator needs to be different for this test env.setParallelism(4); env.fromSequence(1L, 3L).map(i -> i).setParallelism(10).print().setParallelism(20); StreamGraph streamGraph = env.getStreamGraph(); // check the streamGraph parallelism configured final List<StreamNode> streamNodes = streamGraph.getStreamNodes().stream() .sorted(Comparator.comparingInt(StreamNode::getId)) .collect(Collectors.toList()); assertThat(streamNodes.get(0).isParallelismConfigured()).isFalse(); assertThat(streamNodes.get(1).isParallelismConfigured()).isTrue(); assertThat(streamNodes.get(2).isParallelismConfigured()).isTrue(); // check the jobGraph parallelism configured JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph); List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources(); assertThat(jobGraph.getNumberOfVertices()).isEqualTo(3); assertThat(vertices.get(0).isParallelismConfigured()).isFalse(); assertThat(vertices.get(1).isParallelismConfigured()).isTrue(); assertThat(vertices.get(2).isParallelismConfigured()).isTrue(); }
public static HsConsumerId newId(@Nullable HsConsumerId lastId) { return lastId == null ? DEFAULT : new HsConsumerId(lastId.id + 1); }
@Test void testNewIdFromNull() { HsConsumerId consumerId = HsConsumerId.newId(null); assertThat(consumerId).isNotNull().isEqualTo(HsConsumerId.DEFAULT); }
public static FsPermission getConfiguredHistoryIntermediateUserDoneDirPermissions( Configuration conf) { String userDoneDirPermissions = conf.get( JHAdminConfig.MR_HISTORY_INTERMEDIATE_USER_DONE_DIR_PERMISSIONS); if (userDoneDirPermissions == null) { return new FsPermission( JHAdminConfig.DEFAULT_MR_HISTORY_INTERMEDIATE_USER_DONE_DIR_PERMISSIONS); } FsPermission permission = new FsPermission(userDoneDirPermissions); if (permission.getUserAction() != FsAction.ALL || permission.getGroupAction() != FsAction.ALL) { permission = new FsPermission(FsAction.ALL, FsAction.ALL, permission.getOtherAction(), permission.getStickyBit()); LOG.warn("Unsupported permission configured in " + JHAdminConfig.MR_HISTORY_INTERMEDIATE_USER_DONE_DIR_PERMISSIONS + ", the user and the group permission must be 7 (rwx). " + "The permission was set to " + permission.toString()); } return permission; }
@Test public void testGetConfiguredHistoryIntermediateUserDoneDirPermissions() { Configuration conf = new Configuration(); Map<String, FsPermission> parameters = ImmutableMap.of( "775", new FsPermission(0775), "123", new FsPermission(0773), "-rwx", new FsPermission(0770), "+rwx", new FsPermission(0777) ); for (Map.Entry<String, FsPermission> entry : parameters.entrySet()) { conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_USER_DONE_DIR_PERMISSIONS, entry.getKey()); assertEquals(entry.getValue(), getConfiguredHistoryIntermediateUserDoneDirPermissions(conf)); } }
public static SegmentGenerationJobSpec getSegmentGenerationJobSpec(String jobSpecFilePath, String propertyFilePath, Map<String, Object> context, Map<String, String> environmentValues) { Properties properties = new Properties(); if (propertyFilePath != null) { try { properties.load(FileUtils.openInputStream(new File(propertyFilePath))); } catch (IOException e) { throw new RuntimeException( String.format("Unable to read property file [%s] into properties.", propertyFilePath), e); } } Map<String, Object> propertiesMap = (Map) properties; if (environmentValues != null) { for (String propertyName: propertiesMap.keySet()) { if (environmentValues.get(propertyName) != null) { propertiesMap.put(propertyName, environmentValues.get(propertyName)); } } } if (context != null) { propertiesMap.putAll(context); } String jobSpecTemplate; try { jobSpecTemplate = IOUtils.toString(new BufferedReader(new FileReader(jobSpecFilePath))); } catch (IOException e) { throw new RuntimeException(String.format("Unable to read ingestion job spec file [%s].", jobSpecFilePath), e); } String jobSpecStr; try { jobSpecStr = GroovyTemplateUtils.renderTemplate(jobSpecTemplate, propertiesMap); } catch (Exception e) { throw new RuntimeException(String .format("Unable to render templates on ingestion job spec template file - [%s] with propertiesMap - [%s].", jobSpecFilePath, Arrays.toString(propertiesMap.entrySet().toArray())), e); } String jobSpecFormat = (String) propertiesMap.getOrDefault(JOB_SPEC_FORMAT, YAML); if (jobSpecFormat.equals(JSON)) { try { return JsonUtils.stringToObject(jobSpecStr, SegmentGenerationJobSpec.class); } catch (IOException e) { throw new RuntimeException(String .format("Unable to parse job spec - [%s] to JSON with propertiesMap - [%s]", jobSpecFilePath, Arrays.toString(propertiesMap.entrySet().toArray())), e); } } return new Yaml().loadAs(jobSpecStr, SegmentGenerationJobSpec.class); }
@Test public void testIngestionJobLauncherWithTemplateAndPropertyFileAndValueOverride() { Map<String, Object> context = GroovyTemplateUtils.getTemplateContext(Arrays.asList("year=2020")); SegmentGenerationJobSpec spec = IngestionJobLauncher.getSegmentGenerationJobSpec( GroovyTemplateUtils.class.getClassLoader().getResource("ingestion_job_spec_template.yaml").getFile(), GroovyTemplateUtils.class.getClassLoader().getResource("job.config").getFile(), context, null); Assert.assertEquals(spec.getInputDirURI(), "file:///path/to/input/2020/06/07"); Assert.assertEquals(spec.getOutputDirURI(), "file:///path/to/output/2020/06/07"); Assert.assertEquals(spec.getSegmentCreationJobParallelism(), 100); }
@Override public ConnectorMetadata getMetadata() { return metadataFactory.create(); }
@Test public void testCreateHiveConnector(@Mocked HiveConnectorInternalMgr internalMgr) { FeConstants.runningUnitTest = true; Map<String, String> properties = ImmutableMap.of("hive.metastore.uris", "thrift://127.0.0.1:9083", "type", "hive"); new Expectations() { { internalMgr.createHiveMetastore(); result = cachingHiveMetastore; internalMgr.createRemoteFileIO(); result = cachingRemoteFileIO; internalMgr.getHiveMetastoreConf(); result = new CachingHiveMetastoreConf(properties, "hive"); internalMgr.getRemoteFileConf(); result = new CachingRemoteFileConf(properties); internalMgr.getPullRemoteFileExecutor(); result = executorForPullFiles; internalMgr.isSearchRecursive(); result = false; } }; HiveConnector hiveConnector = new HiveConnector(new ConnectorContext("hive_catalog", "hive", properties)); ConnectorMetadata metadata = hiveConnector.getMetadata(); Assert.assertTrue(metadata instanceof HiveMetadata); com.starrocks.catalog.Table table = metadata.getTable("db1", "tbl1"); HiveTable hiveTable = (HiveTable) table; Assert.assertEquals("db1", hiveTable.getDbName()); Assert.assertEquals("tbl1", hiveTable.getTableName()); Assert.assertEquals(Lists.newArrayList("col1"), hiveTable.getPartitionColumnNames()); Assert.assertEquals(Lists.newArrayList("col2"), hiveTable.getDataColumnNames()); Assert.assertEquals("hdfs://127.0.0.1:10000/hive", hiveTable.getTableLocation()); Assert.assertEquals(ScalarType.INT, hiveTable.getPartitionColumns().get(0).getType()); Assert.assertEquals(ScalarType.INT, hiveTable.getBaseSchema().get(0).getType()); Assert.assertEquals("hive_catalog", hiveTable.getCatalogName()); }
@Override public PageResult<MemberGroupDO> getGroupPage(MemberGroupPageReqVO pageReqVO) { return memberGroupMapper.selectPage(pageReqVO); }
@Test public void testGetGroupPage() { String name = randomString(); int status = CommonStatusEnum.ENABLE.getStatus(); // mock 数据 MemberGroupDO dbGroup = randomPojo(MemberGroupDO.class, o -> { // 等会查询到 o.setName(name); o.setStatus(status); o.setCreateTime(buildTime(2023, 2, 18)); }); groupMapper.insert(dbGroup); // 测试 name 不匹配 groupMapper.insert(cloneIgnoreId(dbGroup, o -> o.setName(""))); // 测试 status 不匹配 groupMapper.insert(cloneIgnoreId(dbGroup, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 测试 createTime 不匹配 groupMapper.insert(cloneIgnoreId(dbGroup, o -> o.setCreateTime(null))); // 准备参数 MemberGroupPageReqVO reqVO = new MemberGroupPageReqVO(); reqVO.setName(name); reqVO.setStatus(status); reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28)); // 调用 PageResult<MemberGroupDO> pageResult = groupService.getGroupPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbGroup, pageResult.getList().get(0)); }
public static ReservationDefinition convertReservationDefinition( ReservationDefinitionInfo definitionInfo) { if (definitionInfo == null || definitionInfo.getReservationRequests() == null || definitionInfo.getReservationRequests().getReservationRequest() == null || definitionInfo.getReservationRequests().getReservationRequest().isEmpty()) { throw new RuntimeException("definitionInfo Or ReservationRequests is Null."); } // basic variable long arrival = definitionInfo.getArrival(); long deadline = definitionInfo.getDeadline(); // ReservationRequests reservationRequests String name = definitionInfo.getReservationName(); String recurrenceExpression = definitionInfo.getRecurrenceExpression(); Priority priority = Priority.newInstance(definitionInfo.getPriority()); // reservation requests info List<ReservationRequest> reservationRequestList = new ArrayList<>(); ReservationRequestsInfo reservationRequestsInfo = definitionInfo.getReservationRequests(); List<ReservationRequestInfo> reservationRequestInfos = reservationRequestsInfo.getReservationRequest(); for (ReservationRequestInfo resRequestInfo : reservationRequestInfos) { ResourceInfo resourceInfo = resRequestInfo.getCapability(); Resource capability = Resource.newInstance(resourceInfo.getMemorySize(), resourceInfo.getvCores()); ReservationRequest reservationRequest = ReservationRequest.newInstance(capability, resRequestInfo.getNumContainers(), resRequestInfo.getMinConcurrency(), resRequestInfo.getDuration()); reservationRequestList.add(reservationRequest); } ReservationRequestInterpreter[] values = ReservationRequestInterpreter.values(); ReservationRequestInterpreter reservationRequestInterpreter = values[reservationRequestsInfo.getReservationRequestsInterpreter()]; ReservationRequests reservationRequests = ReservationRequests.newInstance( reservationRequestList, reservationRequestInterpreter); ReservationDefinition definition = ReservationDefinition.newInstance( arrival, deadline, reservationRequests, name, recurrenceExpression, priority); return definition; }
@Test public void testConvertReservationDefinitionEmpty() throws Exception { // param ReservationDefinitionInfo is Null ReservationDefinitionInfo definitionInfo = null; // null request1 LambdaTestUtils.intercept(RuntimeException.class, "definitionInfo Or ReservationRequests is Null.", () -> RouterServerUtil.convertReservationDefinition(definitionInfo)); // param ReservationRequests is Null ReservationDefinitionInfo definitionInfo2 = new ReservationDefinitionInfo(); // null request2 LambdaTestUtils.intercept(RuntimeException.class, "definitionInfo Or ReservationRequests is Null.", () -> RouterServerUtil.convertReservationDefinition(definitionInfo2)); // param ReservationRequests is Null ReservationDefinitionInfo definitionInfo3 = new ReservationDefinitionInfo(); ReservationRequestsInfo requestsInfo = new ReservationRequestsInfo(); definitionInfo3.setReservationRequests(requestsInfo); // null request3 LambdaTestUtils.intercept(RuntimeException.class, "definitionInfo Or ReservationRequests is Null.", () -> RouterServerUtil.convertReservationDefinition(definitionInfo3)); }
@Override public boolean nextConfig(long timeout) { file.validateFile(); if (checkReloaded()) { log.log(FINE, () -> "User forced config reload at " + System.currentTimeMillis()); // User forced reload setConfigIfChanged(updateConfig()); ConfigState<T> configState = getConfigState(); log.log(FINE, () -> "Config updated at " + System.currentTimeMillis() + ", changed: " + configState.isConfigChanged()); log.log(FINE, () -> "Config: " + configState.getConfig().toString()); return true; } if (file.getLastModified() != ts) { setConfigIncGen(updateConfig()); return true; } try { Thread.sleep(timeout); } catch (InterruptedException e) { throw new ConfigInterruptedException(e); } return false; }
@Test public void require_that_dir_config_id_reference_is_not_changed() { final String cfgDir = "src/test/resources/configs/foo"; final String cfgId = "dir:" + cfgDir; final ConfigKey<TestReferenceConfig> key = new ConfigKey<>(TestReferenceConfig.class, cfgId); ConfigSubscription<TestReferenceConfig> sub = ConfigSubscription.get(key, new JrtConfigRequesters(), new DirSource(new File(cfgDir)), new TimingValues()); assertTrue(sub.nextConfig(1000)); assertEquals(cfgId, sub.getConfigState().getConfig().configId()); }
@Override public void execute() throws MojoExecutionException { if (pathToModelDir == null) { throw new MojoExecutionException("pathToModelDir parameter must not be null"); } // skip if input file does not exist if (inputCamelSchemaFile == null || !inputCamelSchemaFile.exists()) { getLog().info("Input Camel schema file: " + inputCamelSchemaFile + " does not exist. Skip EIP document enrichment"); return; } validateExists(inputCamelSchemaFile, "inputCamelSchemaFile"); validateIsFile(inputCamelSchemaFile, "inputCamelSchemaFile"); validateExists(camelCoreXmlDir, "camelCoreXmlDir"); validateIsDirectory(camelCoreModelDir, "camelCoreModelDir"); validateIsDirectory(camelCoreXmlDir, "camelCoreXmlDir"); try { runPlugin(); } catch (Exception e) { throw new MojoExecutionException("Error during plugin execution", e); } if (deleteFilesAfterRun != null) { deleteFilesAfterDone(deleteFilesAfterRun); } }
@Test public void testExecuteInputCamelSchemaIsNotAFile() { when(mockInputSchema.exists()).thenReturn(true); when(mockInputSchema.isFile()).thenReturn(false); try { eipDocumentationEnricherMojo.execute(); fail("Expected MojoExecutionException"); } catch (MojoExecutionException e) { // Expected. } }
@VisibleForTesting static UUnionType create(UExpression... typeAlternatives) { return create(ImmutableList.copyOf(typeAlternatives)); }
@Test public void equality() { new EqualsTester() .addEqualityGroup( UUnionType.create( UClassIdent.create("java.lang.IllegalArgumentException"), UClassIdent.create("java.lang.IllegalStateException"))) .addEqualityGroup( UUnionType.create( UClassIdent.create("java.lang.IllegalStateException"), UClassIdent.create("java.lang.IllegalArgumentException"))) .addEqualityGroup( UUnionType.create( UClassIdent.create("java.lang.IllegalStateException"), UClassIdent.create("java.lang.IllegalArgumentException"), UClassIdent.create("java.lang.IndexOutOfBoundsException"))) .testEquals(); }
@Override public DeleteFederationQueuePoliciesResponse deleteFederationPoliciesByQueues( DeleteFederationQueuePoliciesRequest request) throws YarnException, IOException { // Parameter validation. if (request == null) { routerMetrics.incrDeleteFederationPoliciesByQueuesRetrieved(); RouterServerUtil.logAndThrowException( "Missing deleteFederationQueuePoliciesByQueues Request.", null); } List<String> queues = request.getQueues(); if (CollectionUtils.isEmpty(queues)) { routerMetrics.incrDeleteFederationPoliciesByQueuesRetrieved(); RouterServerUtil.logAndThrowException("queues cannot be null.", null); } // Try calling deleteApplicationHomeSubCluster to delete the application. try { long startTime = clock.getTime(); federationFacade.deletePolicyConfigurations(queues); long stopTime = clock.getTime(); routerMetrics.succeededDeleteFederationPoliciesByQueuesRetrieved(stopTime - startTime); return DeleteFederationQueuePoliciesResponse.newInstance( "queues = " + StringUtils.join(queues, ",") + " delete success."); } catch (Exception e) { RouterServerUtil.logAndThrowException(e, "Unable to deleteFederationPoliciesByQueues due to exception. " + e.getMessage()); } throw new YarnException("Unable to deleteFederationPoliciesByQueues."); }
@Test public void testDeleteFederationPoliciesByQueues() throws IOException, YarnException { // subClusters List<String> subClusterLists = new ArrayList<>(); subClusterLists.add("SC-1"); subClusterLists.add("SC-2"); // generate queue A, queue B, queue C FederationQueueWeight rootA = generateFederationQueueWeight("root.a", subClusterLists); FederationQueueWeight rootB = generateFederationQueueWeight("root.b", subClusterLists); FederationQueueWeight rootC = generateFederationQueueWeight("root.c", subClusterLists); List<FederationQueueWeight> federationQueueWeights = new ArrayList<>(); federationQueueWeights.add(rootA); federationQueueWeights.add(rootB); federationQueueWeights.add(rootC); // Step1. Save Queue Policies in Batches BatchSaveFederationQueuePoliciesRequest request = BatchSaveFederationQueuePoliciesRequest.newInstance(federationQueueWeights); BatchSaveFederationQueuePoliciesResponse policiesResponse = interceptor.batchSaveFederationQueuePolicies(request); assertNotNull(policiesResponse); assertNotNull(policiesResponse.getMessage()); assertEquals("batch save policies success.", policiesResponse.getMessage()); // Step2. Delete the policy of root.c List<String> deleteQueues = new ArrayList<>(); deleteQueues.add("root.c"); DeleteFederationQueuePoliciesRequest deleteRequest = DeleteFederationQueuePoliciesRequest.newInstance(deleteQueues); DeleteFederationQueuePoliciesResponse deleteResponse = interceptor.deleteFederationPoliciesByQueues(deleteRequest); assertNotNull(deleteResponse); String message = deleteResponse.getMessage(); assertEquals("queues = root.c delete success.", message); }
@Operation(summary = "Get the right at certificate") @PostMapping(value = { Constants.URL_OLD_RDW_GETCERTIFICATE, Constants.URL_RDW_GETCERTIFICATE }, consumes = "application/json", produces = "application/json") public GetCertificateResponse getCertificateRestService(@Valid @RequestBody GetCertificateRequest request, @RequestHeader(value = "X-FORWARDED-FOR") String clientIp) { return rdwService.getCertificateRestService(request, clientIp); }
@Test public void getCertificateRestServiceTest() { GetCertificateResponse expectedResponse = new GetCertificateResponse(); when(rdwServiceMock.getCertificateRestService(any(GetCertificateRequest.class), anyString())).thenReturn(expectedResponse); GetCertificateResponse actualResponse = rdwController.getCertificateRestService(new GetCertificateRequest(), ""); assertEquals(expectedResponse, actualResponse); }