focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@PostMapping("/{bsn}") @Operation(summary = "Create a request to place an afnemersindcatie") public void createAfnemersindicatie(@PathVariable("bsn") String bsn) { afnemersindicatieService.createAfnemersindicatie(bsn); }
@Test public void testCreateAfnemersindicatie(){ String bsn = "SSSSSSSSS"; afnemersindicatieController.createAfnemersindicatie(bsn); verify(afnemersindicatieService, times(1)).createAfnemersindicatie(bsn); }
@Override public ExportResult<MailContainerResource> export(UUID id, TokensAndUrlAuthData authData, Optional<ExportInformation> exportInformation) { // Create a new gmail service for the authorized user Gmail gmail = getOrCreateGmail(authData); Messages.List request = null; try { request = gmail.users().messages().list(USER).setMaxResults(PAGE_SIZE); } catch (IOException e) { return new ExportResult<>(e); } if (exportInformation.isPresent() && exportInformation.get().getPaginationData() != null) { request.setPageToken( ((StringPaginationToken) exportInformation.get().getPaginationData()).getToken()); } ListMessagesResponse response = null; try { response = request.execute(); } catch (IOException e) { return new ExportResult<>(e); } List<MailMessageModel> results = new ArrayList<>(response.getMessages().size()); // TODO: this is a good indication we need to swap the interface // as we can't store all the mail messages in memory at once. for (Message listMessage : response.getMessages()) { Message getResponse = null; try { getResponse = gmail.users().messages().get(USER, listMessage.getId()).setFormat("raw").execute(); } catch (IOException e) { return new ExportResult<>(e); } // TODO: note this doesn't transfer things like labels results.add(new MailMessageModel(getResponse.getRaw(), getResponse.getLabelIds())); } PaginationData newPage = null; ResultType resultType = ResultType.END; if (response.getNextPageToken() != null) { newPage = new StringPaginationToken(response.getNextPageToken()); resultType = ResultType.CONTINUE; } MailContainerResource mailContainerResource = new MailContainerResource(null, results); return new ExportResult<>(resultType, mailContainerResource, new ContinuationData(newPage)); }
@Test public void exportMessagesFirstSet() throws IOException { setUpSingleMessageResponse(); // Looking at first page, with at least one page after it messageListResponse.setNextPageToken(NEXT_TOKEN); // Run test ExportResult<MailContainerResource> result = googleMailExporter.export(JOB_ID, null, Optional.empty()); // Check results // Verify correct methods were called InOrder inOrder = Mockito.inOrder(messages, messageListRequest, get); // First request inOrder.verify(messages).list(GoogleMailExporter.USER); inOrder.verify(messageListRequest).setMaxResults(GoogleMailExporter.PAGE_SIZE); verify(messageListRequest, never()).setPageToken(anyString()); // Second request inOrder.verify(messages).get(GoogleMailExporter.USER, MESSAGE_ID); inOrder.verify(get).setFormat("raw"); inOrder.verify(get).execute(); // Check pagination token ContinuationData continuationData = (ContinuationData) result.getContinuationData(); StringPaginationToken paginationToken = (StringPaginationToken) continuationData.getPaginationData(); assertThat(paginationToken.getToken()).isEqualTo(NEXT_TOKEN); // Check messages Collection<MailMessageModel> actualMail = result.getExportedData().getMessages(); assertThat(actualMail.stream().map(MailMessageModel::getRawString).collect(Collectors.toList())) .containsExactly(MESSAGE_RAW); assertThat( actualMail.stream().map(MailMessageModel::getContainerIds).collect(Collectors.toList())) .containsExactly(MESSAGE_LABELS); }
public DataTableDiff calculateUnorderedDiffs() { List<SimpleEntry<List<String>, DiffType>> diffTableRows = new ArrayList<>(); // 1. add all "to" row in extra table // 2. iterate over "from", when a common row occurs, remove it from // extraRows // finally, only extra rows are kept and in same order that in "to". ArrayList<List<String>> extraRows = new ArrayList<>(to.cells()); for (List<String> row : from.cells()) { if (!extraRows.remove(row)) { diffTableRows.add( new SimpleEntry<>(row, DiffType.DELETE)); } else { diffTableRows.add( new SimpleEntry<>(row, DiffType.NONE)); } } for (List<String> cells : extraRows) { diffTableRows.add( new SimpleEntry<>(cells, DiffType.INSERT)); } return DataTableDiff.create(diffTableRows); }
@Test void unordered_diff_with_itself_in_different_order() { assertTrue(new TableDiffer(table(), otherTableWithDifferentOrder()).calculateUnorderedDiffs().isEmpty()); }
public void isAnyOf( @Nullable Object first, @Nullable Object second, @Nullable Object @Nullable ... rest) { isIn(accumulate(first, second, rest)); }
@Test public void isAnyOf() { assertThat("b").isAnyOf("a", "b", "c"); }
@Override public Set<JobID> getRunningJobIds() { return new HashSet<>(this.jobManagerRunners.keySet()); }
@Test void testGetRunningJobIds() { assertThat(testInstance.getRunningJobIds()).isEmpty(); final JobID jobId0 = new JobID(); final JobID jobId1 = new JobID(); testInstance.register(TestingJobManagerRunner.newBuilder().setJobId(jobId0).build()); testInstance.register(TestingJobManagerRunner.newBuilder().setJobId(jobId1).build()); assertThat(testInstance.getRunningJobIds()).containsExactlyInAnyOrder(jobId0, jobId1); }
@VisibleForTesting List<MappingRule> getMappingRules(MappingRulesDescription rules) { List<MappingRule> mappingRules = new ArrayList<>(); for (Rule rule : rules.getRules()) { checkMandatoryParameters(rule); MappingRuleMatcher matcher = createMatcher(rule); MappingRuleAction action = createAction(rule); setFallbackToAction(rule, action); MappingRule mappingRule = new MappingRule(matcher, action); mappingRules.add(mappingRule); } return mappingRules; }
@Test public void testCustomRuleWithMissingQueue() { rule.setPolicy(Policy.CUSTOM); expected.expect(IllegalArgumentException.class); expected.expectMessage("custom queue is undefined"); ruleCreator.getMappingRules(description); }
public static OpenAction getOpenAction(int flag) { // open flags must contain one of O_RDONLY(0), O_WRONLY(1), O_RDWR(2) // O_ACCMODE is mask of read write(3) // Alluxio fuse only supports read-only for completed file // and write-only for file that does not exist or contains open flag O_TRUNC // O_RDWR will be treated as read-only if file exists and no O_TRUNC, // write-only otherwise switch (OpenFlags.valueOf(flag & O_ACCMODE.intValue())) { case O_RDONLY: return OpenAction.READ_ONLY; case O_WRONLY: return OpenAction.WRITE_ONLY; case O_RDWR: return OpenAction.READ_WRITE; default: // Should not fall here return OpenAction.NOT_SUPPORTED; } }
@Test public void writeOnly() { int[] readFlags = new int[]{0x8001, 0x9001}; for (int readFlag : readFlags) { Assert.assertEquals(AlluxioFuseOpenUtils.OpenAction.WRITE_ONLY, AlluxioFuseOpenUtils.getOpenAction(readFlag)); } }
@Override public String getName() { return name; }
@Test public void testGetName() { TopicConfig topicConfig = new TopicConfig(); assertNull(topicConfig.getName()); }
public static FusedPipeline fuse(Pipeline p) { return new GreedyPipelineFuser(p).fusedPipeline; }
@Test public void transformsWithNoEnvironmentBecomeRunnerExecuted() { Components components = partialComponents .toBuilder() .putTransforms( "mystery", PTransform.newBuilder() .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)) .setUniqueName("Mystery") .putInputs("input", "impulse.out") .putOutputs("output", "mystery.out") .build()) .putPcollections("mystery.out", pc("mystery.out")) .putTransforms( "enigma", PTransform.newBuilder() .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)) .setUniqueName("Enigma") .putInputs("input", "impulse.out") .putOutputs("output", "enigma.out") .build()) .putPcollections("enigma.out", pc("enigma.out")) .build(); FusedPipeline fused = GreedyPipelineFuser.fuse(Pipeline.newBuilder().setComponents(components).build()); assertThat( fused.getRunnerExecutedTransforms(), containsInAnyOrder( PipelineNode.pTransform("impulse", components.getTransformsOrThrow("impulse")), PipelineNode.pTransform("mystery", components.getTransformsOrThrow("mystery")), PipelineNode.pTransform("enigma", components.getTransformsOrThrow("enigma")))); assertThat(fused.getFusedStages(), emptyIterable()); }
@Operation(summary = "秒杀场景八(秒杀商品存放redis减库存,异步发送秒杀成功MQ,mongoDb数据落地)") @RequestMapping(value = "/redisReactiveMongo", method = POST, produces = { "application/json;charset=UTF-8"}) public Result redisReactiveMongo(@RequestBody @Valid SeckillWebMockRequestDTO dto) { processSeckill(dto, REDIS_MONGO_REACTIVE); return Result.ok(); }
@Test void redisReactiveMongo() { SeckillWebMockRequestDTO requestDTO = new SeckillWebMockRequestDTO(); requestDTO.setSeckillId(1L); requestDTO.setRequestCount(1); SeckillMockRequestDTO any = new SeckillMockRequestDTO(); any.setSeckillId(1L); Result response = seckillMockController.redisReactiveMongo(requestDTO); verify(seckillService, times(0)).execute(any(SeckillMockRequestDTO.class), anyInt()); assertEquals(0, response.getCode()); }
public static MusicClue forText(String text) { final Matcher m = SONG_PATTERN.matcher(text); if (m.find()) { final String song = m.group(1); return new MusicClue(song); } return null; }
@Test public void forTextEmptyString() { assertNull(MusicClue.forText("")); }
public static CheckpointStorage load( @Nullable CheckpointStorage fromApplication, StateBackend configuredStateBackend, Configuration jobConfig, Configuration clusterConfig, ClassLoader classLoader, @Nullable Logger logger) throws IllegalConfigurationException, DynamicCodeLoadingException { Preconditions.checkNotNull(jobConfig, "jobConfig"); Preconditions.checkNotNull(clusterConfig, "clusterConfig"); Preconditions.checkNotNull(classLoader, "classLoader"); Preconditions.checkNotNull(configuredStateBackend, "statebackend"); // Job level config can override the cluster level config. Configuration mergedConfig = new Configuration(clusterConfig); mergedConfig.addAll(jobConfig); // Legacy state backends always take precedence for backwards compatibility. StateBackend rootStateBackend = (configuredStateBackend instanceof DelegatingStateBackend) ? ((DelegatingStateBackend) configuredStateBackend) .getDelegatedStateBackend() : configuredStateBackend; if (rootStateBackend instanceof CheckpointStorage) { if (logger != null) { logger.info( "Using legacy state backend {} as Job checkpoint storage", rootStateBackend); if (fromApplication != null) { logger.warn( "Checkpoint storage passed via StreamExecutionEnvironment is ignored because legacy state backend '{}' is used. {}", rootStateBackend.getClass().getName(), LEGACY_PRECEDENCE_LOG_MESSAGE); } if (mergedConfig.get(CheckpointingOptions.CHECKPOINT_STORAGE) != null) { logger.warn( "Config option '{}' is ignored because legacy state backend '{}' is used. {}", CheckpointingOptions.CHECKPOINT_STORAGE.key(), rootStateBackend.getClass().getName(), LEGACY_PRECEDENCE_LOG_MESSAGE); } } return (CheckpointStorage) rootStateBackend; } // In the FLINK-2.0, the checkpoint storage from application will not be supported // anymore. if (fromApplication != null) { if (fromApplication instanceof ConfigurableCheckpointStorage) { if (logger != null) { logger.info( "Using job/cluster config to configure application-defined checkpoint storage: {}", fromApplication); if (mergedConfig.get(CheckpointingOptions.CHECKPOINT_STORAGE) != null) { logger.warn( "Config option '{}' is ignored because the checkpoint storage passed via StreamExecutionEnvironment takes precedence.", CheckpointingOptions.CHECKPOINT_STORAGE.key()); } } return ((ConfigurableCheckpointStorage) fromApplication) // Use cluster config for backwards compatibility. .configure(clusterConfig, classLoader); } if (logger != null) { logger.info("Using application defined checkpoint storage: {}", fromApplication); } return fromApplication; } return fromConfig(mergedConfig, classLoader, logger) .orElseGet(() -> createDefaultCheckpointStorage(mergedConfig, classLoader, logger)); }
@Test void testLoadingFails() throws Exception { final Configuration config = new Configuration(); config.set(CheckpointingOptions.CHECKPOINT_STORAGE, "does.not.exist"); assertThatThrownBy( () -> CheckpointStorageLoader.load( null, new ModernStateBackend(), new Configuration(), config, cl, LOG)) .isInstanceOf(DynamicCodeLoadingException.class); // try a class that is not a factory config.set(CheckpointingOptions.CHECKPOINT_STORAGE, java.io.File.class.getName()); assertThatThrownBy( () -> CheckpointStorageLoader.load( null, new ModernStateBackend(), new Configuration(), config, cl, LOG)) .isInstanceOf(DynamicCodeLoadingException.class); // try a factory that fails config.set(CheckpointingOptions.CHECKPOINT_STORAGE, FailingFactory.class.getName()); assertThatThrownBy( () -> CheckpointStorageLoader.load( null, new ModernStateBackend(), new Configuration(), config, cl, LOG)) .isInstanceOf(IllegalConfigurationException.class); }
public double calculateElevationBasedOnTwoPoints(double lat, double lon, double lat0, double lon0, double ele0, double lat1, double lon1, double ele1) { double dlat0 = lat0 - lat; double dlon0 = lon0 - lon; double dlat1 = lat1 - lat; double dlon1 = lon1 - lon; double l0 = Math.sqrt(dlon0 * dlon0 + dlat0 * dlat0); double l1 = Math.sqrt(dlon1 * dlon1 + dlat1 * dlat1); double l = l0 + l1; if (l < EPSILON) { // If points are too close to each other, return elevation of the // point which is closer; return l0 <= l1 ? ele0 : ele1; } else { // Otherwise do linear interpolation return round2(ele0 + (ele1 - ele0) * l0 / l); } }
@Test public void calculatesElevationOnTwoPoints() { assertEquals(15, elevationInterpolator.calculateElevationBasedOnTwoPoints(0, 0, -10, -10, 10, 10, 10, 20), PRECISION); assertEquals(15, elevationInterpolator.calculateElevationBasedOnTwoPoints(-10, 10, -10, -10, 10, 10, 10, 20), PRECISION); assertEquals(15, elevationInterpolator.calculateElevationBasedOnTwoPoints(-5, 5, -10, -10, 10, 10, 10, 20), PRECISION); assertEquals(19, elevationInterpolator.calculateElevationBasedOnTwoPoints(8, 8, -10, -10, 10, 10, 10, 20), PRECISION); assertEquals(10, elevationInterpolator.calculateElevationBasedOnTwoPoints(0, 0, -ElevationInterpolator.EPSILON / 3, 0, 10, ElevationInterpolator.EPSILON / 2, 0, 20), PRECISION); assertEquals(20, elevationInterpolator.calculateElevationBasedOnTwoPoints(0, 0, -ElevationInterpolator.EPSILON / 2, 0, 10, ElevationInterpolator.EPSILON / 3, 0, 20), PRECISION); assertEquals(10, elevationInterpolator.calculateElevationBasedOnTwoPoints(0, 0, 0, 0, 10, 0, 0, 20), PRECISION); }
public static IpAddress valueOf(int value) { byte[] bytes = ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array(); return new IpAddress(Version.INET, bytes); }
@Test(expected = IllegalArgumentException.class) public void testInvalidValueOfArrayInvalidOffsetIPv4() { IpAddress ipAddress; byte[] value; value = new byte[] {11, 22, 33, // Preamble 1, 2, 3, 4, 44, 55}; // Extra bytes ipAddress = IpAddress.valueOf(IpAddress.Version.INET, value, 6); }
@Description("Converts a Geometry object to a SphericalGeography object") @ScalarFunction("to_spherical_geography") @SqlType(SPHERICAL_GEOGRAPHY_TYPE_NAME) public static Slice toSphericalGeography(@SqlType(GEOMETRY_TYPE_NAME) Slice input) { // "every point in input is in range" <=> "the envelope of input is in range" Envelope envelope = deserializeEnvelope(input); if (!envelope.isEmpty()) { checkLatitude(envelope.getYMin()); checkLatitude(envelope.getYMax()); checkLongitude(envelope.getXMin()); checkLongitude(envelope.getXMax()); } OGCGeometry geometry = EsriGeometrySerde.deserialize(input); if (geometry.is3D()) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Cannot convert 3D geometry to a spherical geography"); } GeometryCursor cursor = geometry.getEsriGeometryCursor(); while (true) { com.esri.core.geometry.Geometry subGeometry = cursor.next(); if (subGeometry == null) { break; } if (!GEOMETRY_TYPES_FOR_SPHERICAL_GEOGRAPHY.contains(subGeometry.getType())) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Cannot convert geometry of this type to spherical geography: " + subGeometry.getType()); } } return input; }
@Test public void testGetObjectValue() { List<String> wktList = ImmutableList.of( "POINT EMPTY", "MULTIPOINT EMPTY", "LINESTRING EMPTY", "MULTILINESTRING EMPTY", "POLYGON EMPTY", "MULTIPOLYGON EMPTY", "GEOMETRYCOLLECTION EMPTY", "POINT (-40.2 28.9)", "MULTIPOINT ((-40.2 28.9), (-40.2 31.9))", "LINESTRING (-40.2 28.9, -40.2 31.9, -37.2 31.9)", "MULTILINESTRING ((-40.2 28.9, -40.2 31.9), (-40.2 31.9, -37.2 31.9))", "POLYGON ((-40.2 28.9, -37.2 28.9, -37.2 31.9, -40.2 31.9, -40.2 28.9))", "POLYGON ((-40.2 28.9, -37.2 28.9, -37.2 31.9, -40.2 31.9, -40.2 28.9), (-39.2 29.9, -39.2 30.9, -38.2 30.9, -38.2 29.9, -39.2 29.9))", "MULTIPOLYGON (((-40.2 28.9, -37.2 28.9, -37.2 31.9, -40.2 31.9, -40.2 28.9)), ((-39.2 29.9, -38.2 29.9, -38.2 30.9, -39.2 30.9, -39.2 29.9)))", "GEOMETRYCOLLECTION (POINT (-40.2 28.9), LINESTRING (-40.2 28.9, -40.2 31.9, -37.2 31.9), POLYGON ((-40.2 28.9, -37.2 28.9, -37.2 31.9, -40.2 31.9, -40.2 28.9)))"); BlockBuilder builder = SPHERICAL_GEOGRAPHY.createBlockBuilder(null, wktList.size()); for (String wkt : wktList) { SPHERICAL_GEOGRAPHY.writeSlice(builder, toSphericalGeography(GeoFunctions.stGeometryFromText(utf8Slice(wkt)))); } Block block = builder.build(); for (int i = 0; i < wktList.size(); i++) { assertEquals(wktList.get(i), SPHERICAL_GEOGRAPHY.getObjectValue(null, block, i)); } }
public void setAddress(String address) { if (GatewayType.LOCAL.equalsValue(type) && Asserts.isNotNull(configJson) && configJson.containsKey(RestOptions.PORT.key())) { int colonIndex = address.indexOf(':'); if (colonIndex == -1) { this.address = address + NetConstant.COLON + configJson.get(RestOptions.PORT.key()); } else { this.address = address.replaceAll("(?<=:)\\d{0,6}$", configJson.get(RestOptions.PORT.key())); } } else { this.address = address; } }
@Test void setAddress() { JobConfig jobConfig = new JobConfig(); jobConfig.setAddress("127.0.0.1:8888"); jobConfig.setType(GatewayType.LOCAL.getValue()); Map<String, String> config = new HashMap<>(); config.put(RestOptions.PORT.key(), "9999"); jobConfig.setConfigJson(config); jobConfig.setAddress("127.0.0.1:7777"); assertEquals("127.0.0.1:9999", jobConfig.getAddress()); jobConfig.setAddress("127.0.0.2"); assertEquals("127.0.0.2:9999", jobConfig.getAddress()); config.remove(RestOptions.PORT.key()); jobConfig.setAddress("127.0.0.2:6666"); assertEquals("127.0.0.2:6666", jobConfig.getAddress()); jobConfig.setType(GatewayType.STANDALONE.getLongValue()); jobConfig.setAddress("127.0.0.3:6666"); assertEquals("127.0.0.3:6666", jobConfig.getAddress()); }
public Repository getRepo(String serverUrl, String token, String project, String repoSlug) { HttpUrl url = buildUrl(serverUrl, format("/rest/api/1.0/projects/%s/repos/%s", project, repoSlug)); return doGet(token, url, body -> buildGson().fromJson(body, Repository.class)); }
@Test public void get_repo() { server.enqueue(new MockResponse() .setHeader("Content-Type", "application/json;charset=UTF-8") .setBody( " {" + " \"slug\": \"banana-slug\"," + " \"id\": 2,\n" + " \"name\": \"banana\"," + " \"project\": {\n" + " \"key\": \"HOY\"," + " \"id\": 3,\n" + " \"name\": \"hoy\"" + " }" + " }")); Repository repository = underTest.getRepo(server.url("/").toString(), "token", "", ""); assertThat(repository.getId()).isEqualTo(2L); assertThat(repository.getName()).isEqualTo("banana"); assertThat(repository.getSlug()).isEqualTo("banana-slug"); assertThat(repository.getProject()) .extracting(Project::getId, Project::getKey, Project::getName) .contains(3L, "HOY", "hoy"); }
public Certificate add(X509Certificate cert) { final Certificate db; try { db = Certificate.from(cert); } catch (CertificateEncodingException e) { logger.error("Encoding error in certificate", e); throw new RuntimeException("Encoding error in certificate", e); } try { // Special case for first CSCA certificate for this document type if (repository.countByDocumentType(db.getDocumentType()) == 0) { cert.verify(cert.getPublicKey()); logger.warn("Added first CSCA certificate for {}, set trusted flag manually", db.getDocumentType()); } else { verify(cert, allowAddingExpired ? cert.getNotAfter() : null); } } catch (GeneralSecurityException | VerificationException e) { logger.error( String.format("Could not verify certificate of %s issued by %s", cert.getSubjectX500Principal(), cert.getIssuerX500Principal() ), e ); throw new BadRequestException("Could not verify certificate", e); } return repository.saveAndFlush(db); }
@Test public void shouldAllowToAddCertificateIfFirstOfDocumentType() throws Exception { certificateRepo.saveAndFlush(loadCertificate("rdw/01.cer", true)); final X509Certificate cert = readCertificate("npkd/01.cer"); final Certificate dbCert = service.add(cert); assertEquals(X509Factory.toCanonical(cert.getSubjectX500Principal()), dbCert.getSubject()); assertEquals(false, dbCert.isTrusted()); }
@Override public void run() { if (processor != null) { processor.execute(); } else { if (!beforeHook()) { logger.info("before-feature hook returned [false], aborting: {}", this); } else { scenarios.forEachRemaining(this::processScenario); } afterFeature(); } }
@Test void testJsMapRepeat() { run("js-map-repeat.feature"); }
public static String getMaskByMaskBit(int maskBit) { return MaskBit.get(maskBit); }
@Test public void getMaskByMaskBitTest(){ final String mask = Ipv4Util.getMaskByMaskBit(24); assertEquals("255.255.255.0", mask); }
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) { // we will do the validation / topic-creation in a loop, until we have confirmed all topics // have existed with the expected number of partitions, or some create topic returns fatal errors. log.debug("Starting to validate internal topics {} in partition assignor.", topics); long currentWallClockMs = time.milliseconds(); final long deadlineMs = currentWallClockMs + retryTimeoutMs; Set<String> topicsNotReady = new HashSet<>(topics.keySet()); final Set<String> newlyCreatedTopics = new HashSet<>(); while (!topicsNotReady.isEmpty()) { final Set<String> tempUnknownTopics = new HashSet<>(); topicsNotReady = validateTopics(topicsNotReady, topics, tempUnknownTopics); newlyCreatedTopics.addAll(topicsNotReady); if (!topicsNotReady.isEmpty()) { final Set<NewTopic> newTopics = new HashSet<>(); for (final String topicName : topicsNotReady) { if (tempUnknownTopics.contains(topicName)) { // for the tempUnknownTopics, don't create topic for them // we'll check again later if remaining retries > 0 continue; } final InternalTopicConfig internalTopicConfig = Objects.requireNonNull(topics.get(topicName)); final Map<String, String> topicConfig = internalTopicConfig.properties(defaultTopicConfigs, windowChangeLogAdditionalRetention); log.debug("Going to create topic {} with {} partitions and config {}.", internalTopicConfig.name(), internalTopicConfig.numberOfPartitions(), topicConfig); newTopics.add( new NewTopic( internalTopicConfig.name(), internalTopicConfig.numberOfPartitions(), Optional.of(replicationFactor)) .configs(topicConfig)); } // it's possible that although some topics are not ready yet because they // are temporarily not available, not that they do not exist; in this case // the new topics to create may be empty and hence we can skip here if (!newTopics.isEmpty()) { final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics); for (final Map.Entry<String, KafkaFuture<Void>> createTopicResult : createTopicsResult.values().entrySet()) { final String topicName = createTopicResult.getKey(); try { createTopicResult.getValue().get(); topicsNotReady.remove(topicName); } catch (final InterruptedException fatalException) { // this should not happen; if it ever happens it indicate a bug Thread.currentThread().interrupt(); log.error(INTERRUPTED_ERROR_MESSAGE, fatalException); throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException); } catch (final ExecutionException executionException) { final Throwable cause = executionException.getCause(); if (cause instanceof TopicExistsException) { // This topic didn't exist earlier or its leader not known before; just retain it for next round of validation. log.info( "Could not create topic {}. Topic is probably marked for deletion (number of partitions is unknown).\n" + "Will retry to create this topic in {} ms (to let broker finish async delete operation first).\n" + "Error message was: {}", topicName, retryBackOffMs, cause.toString()); } else { log.error("Unexpected error during topic creation for {}.\n" + "Error message was: {}", topicName, cause.toString()); if (cause instanceof UnsupportedVersionException) { final String errorMessage = cause.getMessage(); if (errorMessage != null && errorMessage.startsWith("Creating topics with default partitions/replication factor are only supported in CreateTopicRequest version 4+")) { throw new StreamsException(String.format( "Could not create topic %s, because brokers don't support configuration replication.factor=-1." + " You can change the replication.factor config or upgrade your brokers to version 2.4 or newer to avoid this error.", topicName) ); } } else if (cause instanceof TimeoutException) { log.error("Creating topic {} timed out.\n" + "Error message was: {}", topicName, cause.toString()); } else { throw new StreamsException( String.format("Could not create topic %s.", topicName), cause ); } } } } } } if (!topicsNotReady.isEmpty()) { currentWallClockMs = time.milliseconds(); if (currentWallClockMs >= deadlineMs) { final String timeoutError = String.format("Could not create topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs); log.error(timeoutError); throw new TimeoutException(timeoutError); } log.info( "Topics {} could not be made ready. Will retry in {} milliseconds. Remaining time in milliseconds: {}", topicsNotReady, retryBackOffMs, deadlineMs - currentWallClockMs ); Utils.sleep(retryBackOffMs); } } log.debug("Completed validating internal topics and created {}", newlyCreatedTopics); return newlyCreatedTopics; }
@Test public void shouldThrowExceptionWhenKeepsTopicLeaderNotAvailable() { final AdminClient admin = mock(AdminClient.class); final MockTime time = new MockTime( (Integer) config.get(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG)) / 15 ); final InternalTopicManager topicManager = new InternalTopicManager( time, admin, new StreamsConfig(config) ); final KafkaFutureImpl<TopicDescription> topicDescriptionFailFuture = new KafkaFutureImpl<>(); topicDescriptionFailFuture.completeExceptionally(new LeaderNotAvailableException("Leader Not Available!")); // simulate describeTopics got LeaderNotAvailableException when(admin.describeTopics(Collections.singleton(topic1))) .thenAnswer(answer -> new MockDescribeTopicsResult( Collections.singletonMap(topic1, topicDescriptionFailFuture))); final InternalTopicConfig internalTopicConfig = new RepartitionTopicConfig(topic1, Collections.emptyMap()); internalTopicConfig.setNumberOfPartitions(1); final TimeoutException exception = assertThrows( TimeoutException.class, () -> topicManager.makeReady(Collections.singletonMap(topic1, internalTopicConfig)) ); assertNull(exception.getCause()); assertThat( exception.getMessage(), equalTo("Could not create topics within 50 milliseconds." + " This can happen if the Kafka cluster is temporarily not available.") ); }
static Model loadModel(String fullModelResourcesSourceClassName, EfestoRuntimeContext context) { try { final Class<? extends Model> aClass = context.loadClass(fullModelResourcesSourceClassName); return aClass.getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new KieRuntimeServiceException(e); } }
@Test void loadModel() { EfestoRuntimeContext context = EfestoRuntimeContextUtils.buildWithParentClassLoader(Thread.currentThread().getContextClassLoader()); Model retrieved = EfestoKieSessionUtil.loadModel(fullModelResourcesSourceClassName, context); assertThat(retrieved).isNotNull(); }
@Override public void resetConfigStats(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_RESETSTAT); syncFuture(f); }
@Test public void testResetConfigStats() { RedisClusterNode master = getFirstMaster(); connection.resetConfigStats(master); }
public static CompositeEvictionChecker newCompositeEvictionChecker(CompositionOperator compositionOperator, EvictionChecker... evictionCheckers) { Preconditions.isNotNull(compositionOperator, "composition"); Preconditions.isNotNull(evictionCheckers, "evictionCheckers"); if (evictionCheckers.length == 0) { throw new IllegalArgumentException("EvictionCheckers cannot be empty!"); } switch (compositionOperator) { case AND: return new CompositeEvictionCheckerWithAndComposition(evictionCheckers); case OR: return new CompositeEvictionCheckerWithOrComposition(evictionCheckers); default: throw new IllegalArgumentException("Invalid composition operator: " + compositionOperator); } }
@Test public void resultShouldReturnTrue_whenAllIsTrue_withOrCompositionOperator() { EvictionChecker evictionChecker1ReturnsTrue = mock(EvictionChecker.class); EvictionChecker evictionChecker2ReturnsTrue = mock(EvictionChecker.class); when(evictionChecker1ReturnsTrue.isEvictionRequired()).thenReturn(true); when(evictionChecker2ReturnsTrue.isEvictionRequired()).thenReturn(true); CompositeEvictionChecker compositeEvictionChecker = CompositeEvictionChecker.newCompositeEvictionChecker( CompositeEvictionChecker.CompositionOperator.OR, evictionChecker1ReturnsTrue, evictionChecker2ReturnsTrue); assertTrue(compositeEvictionChecker.isEvictionRequired()); }
CompletableFuture<String> getOperationFuture() { return operationFuture; }
@Test void testJobFinishedBeforeSavepointFuture() throws Exception { try (MockStopWithSavepointContext ctx = new MockStopWithSavepointContext()) { StateTrackingMockExecutionGraph mockExecutionGraph = new StateTrackingMockExecutionGraph(); CompletableFuture<String> savepointFuture = new CompletableFuture<>(); StopWithSavepoint sws = createStopWithSavepoint(ctx, mockExecutionGraph, savepointFuture); ctx.setStopWithSavepoint(sws); ctx.setExpectFinished(assertNonNull()); mockExecutionGraph.completeTerminationFuture(JobStatus.FINISHED); savepointFuture.complete(SAVEPOINT_PATH); ctx.triggerExecutors(); assertThat(sws.getOperationFuture().get()).isEqualTo(SAVEPOINT_PATH); } }
public Optional<Measure> toMeasure(@Nullable MeasureDto measureDto, Metric metric) { requireNonNull(metric); if (measureDto == null) { return Optional.empty(); } Double value = measureDto.getValue(); String data = measureDto.getData(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(measureDto, value, data); case LONG: return toLongMeasure(measureDto, value, data); case DOUBLE: return toDoubleMeasure(measureDto, value, data); case BOOLEAN: return toBooleanMeasure(measureDto, value, data); case STRING: return toStringMeasure(measureDto, data); case LEVEL: return toLevelMeasure(measureDto, data); case NO_VALUE: return toNoValueMeasure(measureDto); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_returns_long_part_of_value_in_dto_for_Long_Metric() { Optional<Measure> measure = underTest.toMeasure(new MeasureDto().setValue(1.5d), SOME_LONG_METRIC); assertThat(measure).isPresent(); assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.LONG); assertThat(measure.get().getLongValue()).isOne(); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { if(file.isPlaceholder()) { final DescriptiveUrl link = new DriveUrlProvider().toUrl(file).find(DescriptiveUrl.Type.http); if(DescriptiveUrl.EMPTY.equals(link)) { log.warn(String.format("Missing web link for file %s", file)); return new NullInputStream(file.attributes().getSize()); } // Write web link file return IOUtils.toInputStream(UrlFileWriterFactory.get().write(link), Charset.defaultCharset()); } else { final HttpHeaders headers = new HttpHeaders(); headers.setContentType(MEDIA_TYPE); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } headers.setRange(header); // Disable compression headers.setAcceptEncoding("identity"); } if(file.attributes().isDuplicate()) { // Read previous version try { final Drive.Revisions.Get request = session.getClient().revisions().get(fileid.getFileId(file), file.attributes().getVersionId()); request.setRequestHeaders(headers); return request.executeMediaAsInputStream(); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Download {0} failed", e, file); } } else { try { try { final Drive.Files.Get request = session.getClient().files().get(fileid.getFileId(file)); request.setRequestHeaders(headers); request.setSupportsTeamDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")); return request.executeMediaAsInputStream(); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Download {0} failed", e, file); } } catch(RetriableAccessDeniedException e) { throw e; } catch(AccessDeniedException e) { if(!PreferencesFactory.get().getBoolean(String.format("connection.unsecure.download.%s", session.getHost().getHostname()))) { // Not previously dismissed callback.warn(session.getHost(), MessageFormat.format(LocaleFactory.localizedString("Download {0} failed", "Error"), file.getName()), "Acknowledge the risk of downloading known malware or other abusive file.", LocaleFactory.localizedString("Continue", "Credentials"), LocaleFactory.localizedString("Cancel", "Localizable"), String.format("connection.unsecure.download.%s", session.getHost().getHostname())); } try { final Drive.Files.Get request = session.getClient().files().get(fileid.getFileId(file)); request.setAcknowledgeAbuse(true); request.setRequestHeaders(headers); request.setSupportsTeamDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")); return request.executeMediaAsInputStream(); } catch(IOException f) { throw new DriveExceptionMappingService(fileid).map("Download {0} failed", f, file); } } } } }
@Test public void testReadRange() throws Exception { final String name = "ä-" + new AlphanumericRandomStringService().random(); final Path test = new Path(DriveHomeFinderService.MYDRIVE_FOLDER, name, EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), name); final byte[] content = RandomUtils.nextBytes(1023); final OutputStream out = local.getOutputStream(false); assertNotNull(out); IOUtils.write(content, out); out.close(); final DriveFileIdProvider fileid = new DriveFileIdProvider(session); new DriveUploadFeature(session, fileid).upload( test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); final TransferStatus status = new TransferStatus(); status.setLength(content.length); status.setAppend(true); status.setOffset(100L); final InputStream in = new DriveReadFeature(session, fileid).read(test, status.withLength(content.length - 100), new DisabledConnectionCallback()); assertNotNull(in); final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length - 100); new StreamCopier(status, status).transfer(in, buffer); final byte[] reference = new byte[content.length - 100]; System.arraycopy(content, 100, reference, 0, content.length - 100); assertArrayEquals(reference, buffer.toByteArray()); in.close(); new DriveDeleteFeature(session, fileid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public void killContainer(String containerId) throws IOException, TimeoutException, InterruptedException { checkArgument(containerId != null); checkArgument( CONTAINER_ID_PATTERN.matcher(containerId).matches(), "Container ID must be a 64-character hexadecimal string"); runShortCommand(Arrays.asList(dockerExecutable, "kill", containerId)); }
@Test public void killContainer() throws Exception { DockerCommand docker = DockerCommand.getDefault(); String container = docker.runImage( "debian", ImmutableList.of(), ImmutableList.of("/bin/bash", "-c", "sleep 60")); Stopwatch stopwatch = Stopwatch.createStarted(); assertThat("Container should be running.", docker.isContainerRunning(container), is(true)); docker.killContainer(container); long elapsedSec = stopwatch.elapsed(TimeUnit.SECONDS); assertThat( "Container termination should complete before image self-exits", elapsedSec, is(lessThan(60L))); assertThat("Container should be terminated.", docker.isContainerRunning(container), is(false)); }
@Override synchronized long partitionTimestamp(final TopicPartition partition) { return wrapped.partitionTimestamp(partition); }
@Test public void testPartitionTimestamp() { final TopicPartition partition = new TopicPartition("topic", 0); final long timestamp = 12345678L; when(wrapped.partitionTimestamp(partition)).thenReturn(timestamp); final long result = synchronizedPartitionGroup.partitionTimestamp(partition); assertEquals(timestamp, result); verify(wrapped, times(1)).partitionTimestamp(partition); }
@Bean public ShenyuPlugin rateLimiterPlugin() { return new RateLimiterPlugin(new RedisRateLimiter()); }
@Test public void testRateLimiterPlugin() { new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(RateLimiterPluginConfiguration.class)) .withBean(RateLimiterPluginConfigurationTest.class) .withPropertyValues("debug=true") .run(context -> { assertThat(context).hasSingleBean(PluginDataHandler.class); ShenyuPlugin plugin = context.getBean("rateLimiterPlugin", ShenyuPlugin.class); assertNotNull(plugin); assertThat(plugin.named()).isEqualTo(PluginEnum.RATE_LIMITER.getName()); }); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { if (!(request instanceof HttpServletRequest) || !(response instanceof HttpServletResponse) || monitoringDisabled || !instanceEnabled) { // si ce n'est pas une requête http ou si le monitoring est désactivé, on fait suivre chain.doFilter(request, response); return; } final HttpServletRequest httpRequest = (HttpServletRequest) request; final HttpServletResponse httpResponse = (HttpServletResponse) response; if (httpRequest.getRequestURI().equals(getMonitoringUrl(httpRequest))) { doMonitoring(httpRequest, httpResponse); return; } if (!httpCounter.isDisplayed() || isRequestExcluded((HttpServletRequest) request)) { // si cette url est exclue ou si le counter http est désactivé, on ne monitore pas cette requête http chain.doFilter(request, response); return; } doFilter(chain, httpRequest, httpResponse); }
@Test public void testDoMonitoringWithRum() throws ServletException, IOException { try { setProperty(Parameter.RUM_ENABLED, Boolean.TRUE.toString()); setUp(); // simulate html page with RUM final HttpServletRequest requestForRum = createNiceMock(HttpServletRequest.class); expect(requestForRum.getHeader("accept")).andReturn("text/html"); expect(requestForRum.getInputStream()) .andReturn(createInputStreamForString("<html><body>test</body></html>")) .anyTimes(); doFilter(requestForRum); // simulate call to monitoring?resource=boomerang.min.js monitoring(Collections.singletonMap(HttpParameter.RESOURCE, "boomerang.min.js")); monitoring(Collections.emptyMap()); monitoring(Collections.singletonMap(HttpParameter.PART, HttpPart.RUM.getName()), false); // simulate call to monitoring?part=rum to register RUM data final Map<String, String> rumMap = new HashMap<>(); rumMap.put(HttpParameter.PART.getName(), HttpPart.RUM.getName()); rumMap.put("requestName", TEST_REQUEST + " GET"); rumMap.put("serverTime", "100"); rumMap.put("timeToFirstByte", "100"); rumMap.put("domProcessing", "50"); rumMap.put("pageRendering", "50"); monitoring0(rumMap, false); // simulate call to monitoring for details of request with RUM data in html (period=jour : rumHits=0) final Map<HttpParameter, String> graphMap = new HashMap<>(); graphMap.put(HttpParameter.PART, HttpPart.GRAPH.getName()); final String requestId = new CounterRequest(TEST_REQUEST + " GET", Counter.HTTP_COUNTER_NAME).getId(); graphMap.put(HttpParameter.GRAPH, requestId); monitoring(graphMap); // simulate call to monitoring for details of request with RUM data in html (period=tout : rumHits>0) graphMap.put(HttpParameter.PERIOD, Period.TOUT.getCode()); monitoring(graphMap); // simulate call to monitoring for details of request with RUM data in pdf graphMap.put(HttpParameter.FORMAT, "pdf"); monitoring(graphMap); } finally { setProperty(Parameter.RUM_ENABLED, null); } }
public static boolean defineAccessorClass(Class<?> beanClass) { ClassLoader classLoader = beanClass.getClassLoader(); if (classLoader == null) { // Maybe return null if this class was loaded by the bootstrap class loader. return false; } String qualifiedClassName = qualifiedAccessorClassName(beanClass); try { classLoader.loadClass(qualifiedClassName); return true; } catch (ClassNotFoundException ignored) { Object lock; synchronized (defineLock) { if (defineAccessorStatus.containsKey(beanClass)) { return defineAccessorStatus.get(beanClass); } else { lock = getDefineLock(beanClass); } } synchronized (lock) { if (defineAccessorStatus.containsKey(beanClass)) { return defineAccessorStatus.get(beanClass); } long startTime = System.nanoTime(); String code = genCode(beanClass); long durationMs = (System.nanoTime() - startTime) / 1000_000; LOG.info("Generate code {} take {} ms", qualifiedClassName, durationMs); String pkg = CodeGenerator.getPackage(beanClass); CompileUnit compileUnit = new CompileUnit(pkg, accessorClassName(beanClass), code); Map<String, byte[]> classByteCodes = JaninoUtils.toBytecode(classLoader, compileUnit); boolean succeed = ClassLoaderUtils.tryDefineClassesInClassLoader( qualifiedClassName, beanClass, classLoader, classByteCodes.values().iterator().next()) != null; defineAccessorStatus.put(beanClass, succeed); if (!succeed) { LOG.info("Define accessor {} in classloader {} failed.", qualifiedClassName, classLoader); } return succeed; } } }
@Test public void defineAccessorClassConcurrent() throws InterruptedException { ExecutorService executorService = Executors.newFixedThreadPool(10); AtomicBoolean hasException = new AtomicBoolean(false); for (int i = 0; i < 1000; i++) { executorService.execute( () -> { try { assertTrue(AccessorHelper.defineAccessorClass(A.class)); assertTrue(AccessorHelper.defineAccessorClass(Foo.class)); } catch (Exception e) { hasException.set(true); } }); } executorService.shutdown(); assertTrue(executorService.awaitTermination(30, TimeUnit.SECONDS)); assertFalse(hasException.get()); }
void onComplete(List<Map<TopicIdPartition, Acknowledgements>> acknowledgementsMapList) { final ArrayList<Throwable> exceptions = new ArrayList<>(); acknowledgementsMapList.forEach(acknowledgementsMap -> acknowledgementsMap.forEach((partition, acknowledgements) -> { Exception exception = null; if (acknowledgements.getAcknowledgeErrorCode() != null) { exception = acknowledgements.getAcknowledgeErrorCode().exception(); } Set<Long> offsets = acknowledgements.getAcknowledgementsTypeMap().keySet(); Set<Long> offsetsCopy = Collections.unmodifiableSet(offsets); enteredCallback = true; try { acknowledgementCommitCallback.onComplete(Collections.singletonMap(partition, offsetsCopy), exception); } catch (Throwable e) { LOG.error("Exception thrown by acknowledgement commit callback", e); exceptions.add(e); } finally { enteredCallback = false; } })); if (!exceptions.isEmpty()) { throw ConsumerUtils.maybeWrapAsKafkaException(exceptions.get(0), "Exception thrown by acknowledgement commit callback"); } }
@Test public void testMultiplePartitions() throws Exception { Acknowledgements acknowledgements = Acknowledgements.empty(); acknowledgements.add(0L, AcknowledgeType.ACCEPT); acknowledgements.add(1L, AcknowledgeType.REJECT); acknowledgements.setAcknowledgeErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED); acknowledgementsMap.put(tip0, acknowledgements); Acknowledgements acknowledgements1 = Acknowledgements.empty(); acknowledgements1.add(0L, AcknowledgeType.RELEASE); acknowledgements1.setAcknowledgeErrorCode(Errors.INVALID_RECORD_STATE); acknowledgementsMap.put(tip1, acknowledgements1); Map<TopicIdPartition, Acknowledgements> acknowledgementsMap2 = new HashMap<>(); Acknowledgements acknowledgements2 = Acknowledgements.empty(); acknowledgements2.add(0L, AcknowledgeType.ACCEPT); acknowledgementsMap2.put(tip2, acknowledgements2); List<Map<TopicIdPartition, Acknowledgements>> acknowledgementsMapList = new LinkedList<>(); acknowledgementsMapList.add(acknowledgementsMap); acknowledgementsMapList.add(acknowledgementsMap2); acknowledgementCommitCallbackHandler.onComplete(acknowledgementsMapList); TestUtils.retryOnExceptionWithTimeout(() -> { assertInstanceOf(TopicAuthorizationException.class, exceptionMap.get(tpo00)); assertInstanceOf(TopicAuthorizationException.class, exceptionMap.get(tpo01)); assertInstanceOf(InvalidRecordStateException.class, exceptionMap.get(tpo10)); assertNull(exceptionMap.get(tpo20)); }); }
public boolean matches(byte[] code) { return isFullMatch(code, code.length); }
@Test void testMatches() { byte[] startBytes1 = { 0x00 }; byte[] endBytes1 = { (byte) 0xA0 }; CodespaceRange range1 = new CodespaceRange(startBytes1, endBytes1); // check start and end value assertTrue(range1.matches(new byte[] { 0x00 })); assertTrue(range1.matches(new byte[] { (byte) 0xA0 })); // check any value within range assertTrue(range1.matches(new byte[] { 0x10 })); // check first value out of range assertFalse(range1.matches(new byte[] { (byte) 0xA1 })); // check any value out of range assertFalse(range1.matches(new byte[] { (byte) 0xD0 })); // check any value with a different code length assertFalse(range1.matches(new byte[] { 0x00, 0x10 })); byte[] startBytes2 = { (byte) 0x81, 0x40 }; byte[] endBytes2 = { (byte) 0x9F, (byte) 0xFC }; CodespaceRange range2 = new CodespaceRange(startBytes2, endBytes2); // check lower start and end value assertTrue(range2.matches(new byte[] { (byte) 0x81, 0x40 })); assertTrue(range2.matches(new byte[] { (byte) 0x81, (byte) 0xFC })); // check higher start and end value assertTrue(range2.matches(new byte[] { (byte) 0x81, 0x40 })); assertTrue(range2.matches(new byte[] { (byte) 0x9F, 0x40 })); // check any value within lower range assertTrue(range2.matches(new byte[] { (byte) 0x81, 0x65 })); // check any value within higher range assertTrue(range2.matches(new byte[] { (byte) 0x90, 0x40 })); // check first value out of lower range assertFalse(range2.matches(new byte[] { (byte) 0x81, (byte) 0xFD })); // check first value out of higher range assertFalse(range2.matches(new byte[] { (byte) 0xA0, 0x40 })); // check any value out of lower range assertFalse(range2.matches(new byte[] { (byte) 0x81, 0x20 })); // check any value out of higher range assertFalse(range2.matches(new byte[] { 0x10, 0x40 })); // check value between start and end but not within the rectangular assertFalse(range2.matches(new byte[] { (byte) 0x82, 0x20 })); // check any value with a different code length assertFalse(range2.matches(new byte[] { 0x00 })); }
public Set<Integer> nodesThatShouldBeDown(ClusterState state) { return calculate(state).nodesThatShouldBeDown(); }
@Test void initializing_node_not_counted_as_down() { GroupAvailabilityCalculator calc = calcForHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.99); assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:6 storage:6 .4.s:i")), equalTo(emptySet())); }
@Override public Version version() { return version; }
@Test public void testVersionManager() throws Exception { VersionManager versionManager = new VersionManager(); assertNotNull(versionManager.version()); }
boolean matchesNonValueField(final Optional<SourceName> source, final ColumnName column) { if (!source.isPresent()) { return sourceSchemas.values().stream() .anyMatch(schema -> SystemColumns.isPseudoColumn(column) || schema.isKeyColumn(column)); } final SourceName sourceName = source.get(); final LogicalSchema sourceSchema = sourceSchemas.get(sourceName); if (sourceSchema == null) { throw new IllegalArgumentException("Unknown source: " + sourceName); } return sourceSchema.isKeyColumn(column) || SystemColumns.isPseudoColumn(column); }
@Test public void shouldMatchNonValueFieldNameIfAliaasedMetaField() { assertThat(sourceSchemas.matchesNonValueField(Optional.of(ALIAS_2), SystemColumns.ROWTIME_NAME), is(true)); }
Mono<Notification> createNotification(NotificationElement notificationElement) { var reason = notificationElement.reason(); var subscriber = notificationElement.subscriber(); return client.fetch(User.class, subscriber.name()) .flatMap(user -> { Notification notification = new Notification(); notification.setMetadata(new Metadata()); notification.getMetadata().setGenerateName("notification-"); notification.setSpec(new Notification.NotificationSpec()); notification.getSpec().setTitle(notificationElement.notificationTitle()); notification.getSpec().setRawContent(notificationElement.notificationRawBody()); notification.getSpec().setHtmlContent(notificationElement.notificationHtmlBody); notification.getSpec().setRecipient(subscriber.name()); notification.getSpec().setReason(reason.getMetadata().getName()); notification.getSpec().setUnread(true); return client.create(notification); }); }
@Test public void testCreateNotification() { var element = mock(DefaultNotificationCenter.NotificationElement.class); var subscription = createSubscriptions().get(0); var user = mock(User.class); var subscriptionName = subscription.getMetadata().getName(); var subscriber = new Subscriber(UserIdentity.of(subscription.getSpec().getSubscriber().getName()), subscriptionName); when(client.fetch(eq(User.class), eq(subscriber.name()))).thenReturn(Mono.just(user)); when(element.subscriber()).thenReturn(subscriber); when(client.create(any(Notification.class))).thenReturn(Mono.empty()); var reason = new Reason(); reason.setMetadata(new Metadata()); reason.getMetadata().setName("reason-a"); reason.setSpec(new Reason.Spec()); reason.getSpec().setReasonType("new-reply-on-comment"); when(element.reason()).thenReturn(reason); notificationCenter.createNotification(element).block(); verify(client).fetch(eq(User.class), eq(subscriber.name())); verify(client).create(any(Notification.class)); }
@Override public DescribeShareGroupsResult describeShareGroups(final Collection<String> groupIds, final DescribeShareGroupsOptions options) { SimpleAdminApiFuture<CoordinatorKey, ShareGroupDescription> future = DescribeShareGroupsHandler.newFuture(groupIds); DescribeShareGroupsHandler handler = new DescribeShareGroupsHandler(options.includeAuthorizedOperations(), logContext); invokeDriver(handler, future, options.timeoutMs); return new DescribeShareGroupsResult(future.all().entrySet().stream() .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); }
@Test public void testDescribeShareGroups() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); // Retriable FindCoordinatorResponse errors should be retried env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); ShareGroupDescribeResponseData data = new ShareGroupDescribeResponseData(); // Retriable errors should be retried data.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(GROUP_ID) .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code())); env.kafkaClient().prepareResponse(new ShareGroupDescribeResponse(data)); /* * We need to return two responses here, one with NOT_COORDINATOR error when calling describe share group * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a * FindCoordinatorResponse. * * And the same reason for COORDINATOR_NOT_AVAILABLE error response */ data = new ShareGroupDescribeResponseData(); data.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(GROUP_ID) .setErrorCode(Errors.NOT_COORDINATOR.code())); env.kafkaClient().prepareResponse(new ShareGroupDescribeResponse(data)); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); data = new ShareGroupDescribeResponseData(); data.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(GROUP_ID) .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code())); env.kafkaClient().prepareResponse(new ShareGroupDescribeResponse(data)); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); data = new ShareGroupDescribeResponseData(); ShareGroupDescribeResponseData.TopicPartitions topicPartitions = new ShareGroupDescribeResponseData.TopicPartitions() .setTopicName("my_topic") .setPartitions(asList(0, 1, 2)); ShareGroupDescribeResponseData.Assignment memberAssignment = new ShareGroupDescribeResponseData.Assignment() .setTopicPartitions(asList(topicPartitions)); ShareGroupDescribeResponseData.Member memberOne = new ShareGroupDescribeResponseData.Member() .setMemberId("0") .setClientId("clientId0") .setClientHost("clientHost") .setAssignment(memberAssignment); ShareGroupDescribeResponseData.Member memberTwo = new ShareGroupDescribeResponseData.Member() .setMemberId("1") .setClientId("clientId1") .setClientHost("clientHost") .setAssignment(memberAssignment); ShareGroupDescribeResponseData group0Data = new ShareGroupDescribeResponseData(); group0Data.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(GROUP_ID) .setGroupState(ShareGroupState.STABLE.toString()) .setMembers(asList(memberOne, memberTwo))); final List<TopicPartition> expectedTopicPartitions = new ArrayList<>(); expectedTopicPartitions.add(0, new TopicPartition("my_topic", 0)); expectedTopicPartitions.add(1, new TopicPartition("my_topic", 1)); expectedTopicPartitions.add(2, new TopicPartition("my_topic", 2)); List<MemberDescription> expectedMemberDescriptions = new ArrayList<>(); expectedMemberDescriptions.add(convertToMemberDescriptions(memberOne, new MemberAssignment(new HashSet<>(expectedTopicPartitions)))); expectedMemberDescriptions.add(convertToMemberDescriptions(memberTwo, new MemberAssignment(new HashSet<>(expectedTopicPartitions)))); data.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(GROUP_ID) .setGroupState(ShareGroupState.STABLE.toString()) .setMembers(asList(memberOne, memberTwo))); env.kafkaClient().prepareResponse(new ShareGroupDescribeResponse(data)); final DescribeShareGroupsResult result = env.adminClient().describeShareGroups(singletonList(GROUP_ID)); final ShareGroupDescription groupDescription = result.describedGroups().get(GROUP_ID).get(); assertEquals(1, result.describedGroups().size()); assertEquals(GROUP_ID, groupDescription.groupId()); assertEquals(2, groupDescription.members().size()); assertEquals(expectedMemberDescriptions, groupDescription.members()); } }
public LogicalSchema resolve(final ExecutionStep<?> step, final LogicalSchema schema) { return Optional.ofNullable(HANDLERS.get(step.getClass())) .map(h -> h.handle(this, schema, step)) .orElseThrow(() -> new IllegalStateException("Unhandled step class: " + step.getClass())); }
@Test public void shouldResolveSchemaForStreamGroupByKey() { // Given: final StreamGroupByKey step = new StreamGroupByKey( PROPERTIES, streamSource, formats ); // When: final LogicalSchema result = resolver.resolve(step, SCHEMA); // Then: assertThat(result, is(SCHEMA)); }
public void addRecord(IN record) { runWithLock( () -> { checkState(!closed); if (udfFinished) { return; } if (cacheQueue.size() < DEFAULT_MAX_CACHE_NUM) { cacheQueue.add(record); if (cacheQueue.size() == 1) { cacheNotEmpty.signalAll(); } } else { waitCacheNotFull(); addRecord(record); } }); }
@Test void testAddRecord() throws ExecutionException, InterruptedException { CompletableFuture<List<String>> result = new CompletableFuture<>(); CompletableFuture<Object> udfFinishTrigger = new CompletableFuture<>(); MapPartitionIterator<String> iterator = new MapPartitionIterator<>( inputIterator -> { List<String> strings = new ArrayList<>(); for (int index = 0; index < RECORD_NUMBER; ++index) { strings.add(inputIterator.next()); } result.complete(strings); try { udfFinishTrigger.get(); } catch (InterruptedException | ExecutionException e) { ExceptionUtils.rethrow(e); } }); // 1.Test addRecord() when the cache is empty in the MapPartitionIterator. addRecordToIterator(RECORD_NUMBER, iterator); List<String> results = result.get(); assertThat(results.size()).isEqualTo(RECORD_NUMBER); assertThat(results.get(0)).isEqualTo(RECORD); assertThat(results.get(1)).isEqualTo(RECORD); assertThat(results.get(2)).isEqualTo(RECORD); // 2.Test addRecord() when the cache is full in the MapPartitionIterator. addRecordToIterator(DEFAULT_MAX_CACHE_NUM, iterator); CompletableFuture<Object> mockedTaskThread1 = new CompletableFuture<>(); CompletableFuture<List<String>> addRecordFinishIdentifier1 = new CompletableFuture<>(); mockedTaskThread1.thenRunAsync( () -> { iterator.addRecord(RECORD); addRecordFinishIdentifier1.complete(null); }); mockedTaskThread1.complete(null); assertThat(addRecordFinishIdentifier1).isNotCompleted(); iterator.next(); addRecordFinishIdentifier1.get(); assertThat(addRecordFinishIdentifier1).isCompleted(); // 2.Test addRecord() when the udf is finished in the MapPartitionIterator. CompletableFuture<Object> mockedTaskThread2 = new CompletableFuture<>(); CompletableFuture<List<String>> addRecordFinishIdentifier2 = new CompletableFuture<>(); mockedTaskThread2.thenRunAsync( () -> { iterator.addRecord(RECORD); addRecordFinishIdentifier2.complete(null); }); mockedTaskThread2.complete(null); assertThat(addRecordFinishIdentifier2).isNotCompleted(); udfFinishTrigger.complete(null); addRecordFinishIdentifier2.get(); assertThat(addRecordFinishIdentifier2).isCompleted(); assertThat(udfFinishTrigger).isCompleted(); iterator.close(); }
protected ValidationTaskResult loadHdfsConfig() { Pair<String, String> clientConfFiles = getHdfsConfPaths(); String coreConfPath = clientConfFiles.getFirst(); String hdfsConfPath = clientConfFiles.getSecond(); mCoreConf = accessAndParseConf("core-site.xml", coreConfPath); mHdfsConf = accessAndParseConf("hdfs-site.xml", hdfsConfPath); return new ValidationTaskResult(mState, getName(), mMsg.toString(), mAdvice.toString()); }
@Test public void missingCoreSiteXML() { // Only prepare hdfs-site.xml String hdfsSite = Paths.get(sTestDir.toPath().toString(), "hdfs-site.xml").toString(); ValidationTestUtils.writeXML(hdfsSite, ImmutableMap.of("key1", "value1")); CONF.set(PropertyKey.UNDERFS_HDFS_CONFIGURATION, hdfsSite); HdfsConfValidationTask task = new HdfsConfValidationTask("hdfs://namenode:9000/alluxio", CONF); ValidationTaskResult result = task.loadHdfsConfig(); assertEquals(result.getState(), ValidationUtils.State.SKIPPED); assertThat(result.getResult(), containsString("core-site.xml is not configured")); assertThat(result.getAdvice(), containsString("core-site.xml")); }
public void loadInstanceMetadataSnapshot(ConcurrentMap<Service, ConcurrentMap<String, InstanceMetadata>> snapshot) { ConcurrentMap<Service, ConcurrentMap<String, InstanceMetadata>> oldSnapshot = instanceMetadataMap; instanceMetadataMap = snapshot; oldSnapshot.clear(); }
@Test void testLoadInstanceMetadataSnapshot() { namingMetadataManager.loadInstanceMetadataSnapshot(new ConcurrentHashMap<>()); Map<Service, ConcurrentMap<String, InstanceMetadata>> instanceMetadataSnapshot = namingMetadataManager.getInstanceMetadataSnapshot(); assertEquals(0, instanceMetadataSnapshot.size()); }
public static DocParameter copy(final DocParameter source) { DocParameter target = new DocParameter(); target.setId(source.getId()); target.setExample(source.getExample()); target.setDescription(source.getDescription()); target.setName(source.getName()); target.setModule(source.getModule()); target.setMaxLength(source.getMaxLength()); target.setRefs(source.getRefs()); target.setRequired(source.isRequired()); target.setType(source.getType()); target.setXExample(source.getXExample()); return target; }
@Test public void testCopy() { DocParameter copied = DocParameter.copy(docParameter); assertEquals("shenyuDescription", copied.getDescription()); assertEquals(0, copied.getId().intValue()); assertEquals("shenyuMaxLength", copied.getMaxLength()); assertEquals("shenyuSetModule", copied.getModule()); assertEquals("shenyuName", copied.getName()); assertTrue(copied.isRequired()); assertEquals("shenyuType", copied.getType()); assertEquals("shenyuXExample", copied.getXExample()); copied.setRefs(Collections.singletonList(copied)); assertEquals(Collections.singletonList(copied), copied.getRefs()); }
static boolean isReady(@Nullable CompletableFuture<?> future) { return (future != null) && future.isDone() && !future.isCompletedExceptionally() && (future.join() != null); }
@Test(dataProvider = "successful") public void isReady_success(CompletableFuture<Integer> future) { assertThat(Async.isReady(future)).isTrue(); }
@Override public MaskColumnRuleConfiguration swapToObject(final YamlMaskColumnRuleConfiguration yamlConfig) { return new MaskColumnRuleConfiguration(yamlConfig.getLogicColumn(), yamlConfig.getMaskAlgorithm()); }
@Test void assertSwapToObject() { YamlMaskColumnRuleConfiguration yamlMaskColumnRuleConfig = new YamlMaskColumnRuleConfiguration(); yamlMaskColumnRuleConfig.setLogicColumn("logicColumn"); yamlMaskColumnRuleConfig.setMaskAlgorithm("md5_mask"); MaskColumnRuleConfiguration actual = new YamlMaskColumnRuleConfigurationSwapper().swapToObject(yamlMaskColumnRuleConfig); assertThat(actual.getLogicColumn(), is("logicColumn")); assertThat(actual.getMaskAlgorithm(), is("md5_mask")); }
@Override public void updateFileConfig(FileConfigSaveReqVO updateReqVO) { // 校验存在 FileConfigDO config = validateFileConfigExists(updateReqVO.getId()); // 更新 FileConfigDO updateObj = FileConfigConvert.INSTANCE.convert(updateReqVO) .setConfig(parseClientConfig(config.getStorage(), updateReqVO.getConfig())); fileConfigMapper.updateById(updateObj); // 清空缓存 clearCache(config.getId(), null); }
@Test public void testUpdateFileConfig_success() { // mock 数据 FileConfigDO dbFileConfig = randomPojo(FileConfigDO.class, o -> o.setStorage(FileStorageEnum.LOCAL.getStorage()) .setConfig(new LocalFileClientConfig().setBasePath("/yunai").setDomain("https://www.iocoder.cn"))); fileConfigMapper.insert(dbFileConfig);// @Sql: 先插入出一条存在的数据 // 准备参数 FileConfigSaveReqVO reqVO = randomPojo(FileConfigSaveReqVO.class, o -> { o.setId(dbFileConfig.getId()); // 设置更新的 ID o.setStorage(FileStorageEnum.LOCAL.getStorage()); Map<String, Object> config = MapUtil.<String, Object>builder().put("basePath", "/yunai2") .put("domain", "https://doc.iocoder.cn").build(); o.setConfig(config); }); // 调用 fileConfigService.updateFileConfig(reqVO); // 校验是否更新正确 FileConfigDO fileConfig = fileConfigMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, fileConfig, "config"); assertEquals("/yunai2", ((LocalFileClientConfig) fileConfig.getConfig()).getBasePath()); assertEquals("https://doc.iocoder.cn", ((LocalFileClientConfig) fileConfig.getConfig()).getDomain()); // 验证 cache assertNull(fileConfigService.getClientCache().getIfPresent(fileConfig.getId())); }
void reconcileStatus(String name) { client.fetch(Theme.class, name).ifPresent(theme -> { final Theme.ThemeStatus status = defaultIfNull(theme.getStatus(), new Theme.ThemeStatus()); final Theme.ThemeStatus oldStatus = JsonUtils.deepCopy(status); theme.setStatus(status); var themePath = themeRoot.get().resolve(name); status.setLocation(themePath.toAbsolutePath().toString()); status.setPhase(Theme.ThemePhase.READY); Condition.ConditionBuilder conditionBuilder = Condition.builder() .type(Theme.ThemePhase.READY.name()) .status(ConditionStatus.TRUE) .reason(Theme.ThemePhase.READY.name()) .message(StringUtils.EMPTY) .lastTransitionTime(Instant.now()); // Check if this theme version is match requires param. String normalVersion = systemVersionSupplier.get().getNormalVersion(); String requires = theme.getSpec().getRequires(); if (!VersionUtils.satisfiesRequires(normalVersion, requires)) { status.setPhase(Theme.ThemePhase.FAILED); conditionBuilder .type(Theme.ThemePhase.FAILED.name()) .status(ConditionStatus.FALSE) .reason("UnsatisfiedRequiresVersion") .message(String.format( "Theme requires a minimum system version of [%s], and you have [%s].", requires, normalVersion)); } Theme.nullSafeConditionList(theme).addAndEvictFIFO(conditionBuilder.build()); if (!Objects.equals(oldStatus, status)) { client.update(theme); } }); }
@Test void reconcileStatus() { when(systemVersionSupplier.get()).thenReturn(Version.valueOf("2.3.0")); Path testWorkDir = tempDirectory.resolve("reconcile-delete"); when(themeRoot.get()).thenReturn(testWorkDir); final ThemeReconciler themeReconciler = new ThemeReconciler(extensionClient, themeRoot, systemVersionSupplier); Theme theme = fakeTheme(); theme.setStatus(null); theme.getSpec().setRequires(">2.3.0"); when(extensionClient.fetch(eq(Theme.class), eq("fake-theme"))) .thenReturn(Optional.of(theme)); themeReconciler.reconcileStatus("fake-theme"); ArgumentCaptor<Theme> themeUpdateCaptor = ArgumentCaptor.forClass(Theme.class); verify(extensionClient).update(themeUpdateCaptor.capture()); Theme value = themeUpdateCaptor.getValue(); assertThat(value.getStatus()).isNotNull(); assertThat(value.getStatus().getConditions().peekFirst().getType()) .isEqualTo(Theme.ThemePhase.FAILED.name()); assertThat(value.getStatus().getPhase()) .isEqualTo(Theme.ThemePhase.FAILED); theme.getSpec().setRequires(">=2.3.0"); when(extensionClient.fetch(eq(Theme.class), eq("fake-theme"))) .thenReturn(Optional.of(theme)); themeReconciler.reconcileStatus("fake-theme"); verify(extensionClient, times(2)).update(themeUpdateCaptor.capture()); assertThat(themeUpdateCaptor.getValue().getStatus().getPhase()) .isEqualTo(Theme.ThemePhase.READY); }
@Description("Cauchy cdf for a given value, median, and scale (gamma)") @ScalarFunction @SqlType(StandardTypes.DOUBLE) public static double cauchyCdf( @SqlType(StandardTypes.DOUBLE) double median, @SqlType(StandardTypes.DOUBLE) double scale, @SqlType(StandardTypes.DOUBLE) double value) { checkCondition(scale > 0, INVALID_FUNCTION_ARGUMENT, "cauchyCdf Function: scale must be greater than 0"); CauchyDistribution distribution = new CauchyDistribution(null, median, scale, CauchyDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY); return distribution.cumulativeProbability(value); }
@Test public void testCauchyCdf() { assertFunction("cauchy_cdf(0.0, 1.0, 0.0)", DOUBLE, 0.5); assertFunction("cauchy_cdf(0.0, 1.0, 1.0)", DOUBLE, 0.75); assertFunction("cauchy_cdf(5.0, 2.0, 3.0)", DOUBLE, 0.25); assertFunction("round(cauchy_cdf(2.5, 1.0, 3.0), 2)", DOUBLE, 0.65); assertFunction("round(cauchy_cdf(5.0, 1.0, 3.0), 2)", DOUBLE, 0.15); assertInvalidFunction("cauchy_cdf(0.0, -1.0, 0.0)", "cauchyCdf Function: scale must be greater than 0"); }
static boolean isProviderEnabled(Configuration configuration, String serviceName) { return SecurityOptions.forProvider(configuration, serviceName) .get(DELEGATION_TOKEN_PROVIDER_ENABLED); }
@Test public void isProviderEnabledMustGiveBackTrueByDefault() { Configuration configuration = new Configuration(); assertTrue(DefaultDelegationTokenManager.isProviderEnabled(configuration, "test")); }
String getNormalizedPointHint() { return pointHint; }
@Test public void normalization() { assertEquals("northderby", createNameSimilarityEdgeFilter("North Derby Lane").getNormalizedPointHint()); // do not remove the number as it is a significant part of the name, especially in the US assertEquals("28north", createNameSimilarityEdgeFilter("I-28 N").getNormalizedPointHint()); assertEquals("28north", createNameSimilarityEdgeFilter(" I-28 N ").getNormalizedPointHint()); assertEquals("south23rd", createNameSimilarityEdgeFilter("S 23rd St").getNormalizedPointHint()); assertEquals("66", createNameSimilarityEdgeFilter("Route 66").getNormalizedPointHint()); assertEquals("fayettecounty1", createNameSimilarityEdgeFilter("Fayette County Rd 1").getNormalizedPointHint()); // too short, except when numbers assertEquals("112", createNameSimilarityEdgeFilter("A B C 1 12").getNormalizedPointHint()); }
@Override protected Pair<String, ProtocolNegotiatorBuilder> defaultBuilderPair() { return Pair.with(TYPE_PROPERTY_KEY, new SdkDefaultTlsProtocolNegotiatorBuilder()); }
@Test void testDefaultBuilderPair() { Pair<String, ProtocolNegotiatorBuilder> defaultPair = SdkProtocolNegotiatorBuilderSingleton.getSingleton().defaultBuilderPair(); assertNotNull(defaultPair); assertEquals(SdkProtocolNegotiatorBuilderSingleton.TYPE_PROPERTY_KEY, defaultPair.getFirst()); assertNotNull(defaultPair.getSecond()); }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyMissingItemFailure() { expectFailureWhenTestingThat(asList(1, 2)).containsExactly(1, 2, 4); assertFailureValue("missing (1)", "4"); }
public static Optional<String> reasonNotSupported( short newVersion, String what, VersionRange range ) { if (!range.contains(newVersion)) { if (range.max() == (short) 0) { return Optional.of(what + " does not support this feature."); } else { return Optional.of(what + " only supports versions " + range); } } return Optional.empty(); }
@Test public void testReasonNotSupported() { assertEquals(Optional.of("Local controller 0 only supports versions 0-3"), QuorumFeatures.reasonNotSupported((short) 10, "Local controller 0", VersionRange.of(0, 3))); assertEquals(Optional.empty(), QuorumFeatures.reasonNotSupported((short) 3, "Local controller 0", VersionRange.of(0, 3))); }
boolean needsMigration() { File mappingFile = UserIdMapper.getConfigFile(usersDirectory); if (mappingFile.exists() && mappingFile.isFile()) { LOGGER.finest("User mapping file already exists. No migration needed."); return false; } File[] userDirectories = listUserDirectories(); return userDirectories != null && userDirectories.length > 0; }
@Test public void migrateSimpleUser() throws IOException { File usersDirectory = createTestDirectory(getClass(), name); IdStrategy idStrategy = IdStrategy.CASE_INSENSITIVE; UserIdMigrator migrator = new UserIdMigrator(usersDirectory, idStrategy); TestUserIdMapper mapper = new TestUserIdMapper(usersDirectory, idStrategy); mapper.init(); assertThat(migrator.needsMigration(), is(false)); mapper = new TestUserIdMapper(usersDirectory, idStrategy); mapper.init(); assertThat(mapper.getConvertedUserIds().size(), is(1)); assertThat(mapper.isMapped("fred"), is(true)); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position ) { try { final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore .store(QueryableStoreTypes.timestampedWindowStore(), partition); final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds); final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds); try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = cacheBypassFetcher.fetch(store, key, lower, upper)) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next(); final Instant windowStart = Instant.ofEpochMilli(next.key); if (!windowStartBounds.contains(windowStart)) { continue; } final Instant windowEnd = windowStart.plus(windowSize); if (!windowEndBounds.contains(windowEnd)) { continue; } final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli()); final WindowedRow row = WindowedRow.of( stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp() ); builder.add(row); } return KsMaterializedQueryResult.rowIterator(builder.build().iterator()); } } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldMaintainResultOrder() { // Given: when(fetchIterator.hasNext()) .thenReturn(true) .thenReturn(true) .thenReturn(true) .thenReturn(false); final Instant start = WINDOW_START_BOUNDS.lowerEndpoint(); when(fetchIterator.next()) .thenReturn(new KeyValue<>(start.toEpochMilli(), VALUE_1)) .thenReturn(new KeyValue<>(start.plusMillis(1).toEpochMilli(), VALUE_2)) .thenReturn(new KeyValue<>(start.plusMillis(2).toEpochMilli(), VALUE_3)) .thenThrow(new AssertionError()); when(cacheBypassFetcher.fetch(eq(tableStore), any(), any(), any())).thenReturn(fetchIterator); // When: final Iterator<WindowedRow> rowIterator = table.get(A_KEY, PARTITION, Range.all(), Range.all()).rowIterator; // Then: assertThat(rowIterator.hasNext(), is(true)); final List<WindowedRow> resultList = Lists.newArrayList(rowIterator); assertThat(resultList, contains( WindowedRow.of( SCHEMA, windowedKey(start), VALUE_1.value(), VALUE_1.timestamp() ), WindowedRow.of( SCHEMA, windowedKey(start.plusMillis(1)), VALUE_2.value(), VALUE_2.timestamp() ), WindowedRow.of( SCHEMA, windowedKey(start.plusMillis(2)), VALUE_3.value(), VALUE_3.timestamp() ) )); }
public void write(MemoryBuffer buffer, Locale l) { fury.writeJavaString(buffer, l.getLanguage()); fury.writeJavaString(buffer, l.getCountry()); fury.writeJavaString(buffer, l.getVariant()); }
@Test public void testWrite() { Fury fury = Fury.builder().withLanguage(Language.JAVA).requireClassRegistration(false).build(); serDeCheckSerializerAndEqual(fury, Locale.US, "LocaleSerializer"); serDeCheckSerializerAndEqual(fury, Locale.CHINESE, "LocaleSerializer"); serDeCheckSerializerAndEqual(fury, Locale.ENGLISH, "LocaleSerializer"); serDeCheckSerializerAndEqual(fury, Locale.TRADITIONAL_CHINESE, "LocaleSerializer"); serDeCheckSerializerAndEqual(fury, Locale.CHINA, "LocaleSerializer"); serDeCheckSerializerAndEqual(fury, Locale.TAIWAN, "LocaleSerializer"); serDeCheckSerializerAndEqual(fury, Locale.getDefault(), "LocaleSerializer"); }
@Override public ObjectNode encode(MepEntry mepEntry, CodecContext context) { checkNotNull(mepEntry, "Mep cannot be null"); ObjectNode result = context.mapper().createObjectNode(); //Get the common attributes Mep mep = mepEntry; ObjectNode mepAttrs = new MepCodec().encode(mep, context); Iterator<Entry<String, JsonNode>> elements = mepAttrs.fields(); while (elements.hasNext()) { Entry<String, JsonNode> element = elements.next(); result.set(element.getKey(), element.getValue()); } if (mepEntry.macAddress() != null) { result.put("macAddress", mepEntry.macAddress().toString()); } if (mepEntry.loopbackAttributes() != null) { result.set("loopback", new MepLbEntryCodec() .encode(mepEntry.loopbackAttributes(), context)); } if (mepEntry.activeRemoteMepList() != null) { result.set("remoteMeps", new RemoteMepEntryCodec() .encode(mepEntry.activeRemoteMepList(), context)); } if (mepEntry.activeErrorCcmDefect()) { result.put("activeErrorCcmDefect", true); } if (mepEntry.activeMacStatusDefect()) { result.put("activeMacStatusDefect", true); } if (mepEntry.activeRdiCcmDefect()) { result.put("activeRdiCcmDefect", true); } if (mepEntry.activeRemoteCcmDefect()) { result.put("activeRemoteCcmDefect", true); } if (mepEntry.activeXconCcmDefect()) { result.put("activeXconCcmDefect", true); } return result; }
@Test public void testEncodeIterableOfMepEntryCodecContext() throws CfmConfigException { MepEntry mepEntry2 = DefaultMepEntry.builder( MepId.valueOf((short) 33), DeviceId.deviceId("netconf:4321:830"), PortNumber.portNumber(1), MepDirection.DOWN_MEP, MdIdCharStr.asMdId("md-2"), MaIdCharStr.asMaId("ma-2-2")) .buildEntry(); ArrayList<MepEntry> meps = new ArrayList<>(); meps.add(mepEntry1); meps.add(mepEntry2); ObjectNode node = mapper.createObjectNode(); node.set("mep", context.codec(MepEntry.class) .encode(meps, context)); Iterator<JsonNode> an = node.get("mep").elements(); while (an.hasNext()) { JsonNode jn = an.next(); assertEquals("md-", jn.get("mdName").asText().substring(0, 3)); } }
@Override public Set<Host> getHostsByVlan(VlanId vlanId) { checkNotNull(vlanId, "VLAN identifier cannot be null"); return filter(getHostsColl(), host -> Objects.equals(host.vlan(), vlanId)); }
@Test(expected = NullPointerException.class) public void testGetHostsByNullVlan() { VirtualNetwork vnet = setupEmptyVnet(); HostService hostService = manager.get(vnet.id(), HostService.class); hostService.getHostsByVlan(null); }
@Nullable public static PipelineBreakerResult executePipelineBreakers(OpChainSchedulerService scheduler, MailboxService mailboxService, WorkerMetadata workerMetadata, StagePlan stagePlan, Map<String, String> opChainMetadata, long requestId, long deadlineMs) { PipelineBreakerContext pipelineBreakerContext = new PipelineBreakerContext(); PipelineBreakerVisitor.visitPlanRoot(stagePlan.getRootNode(), pipelineBreakerContext); if (!pipelineBreakerContext.getPipelineBreakerMap().isEmpty()) { try { // TODO: This PlanRequestContext needs to indicate it is a pre-stage opChain and only listens to pre-stage // OpChain receive-mail callbacks. // see also: MailboxIdUtils TODOs, de-couple mailbox id from query information OpChainExecutionContext opChainExecutionContext = new OpChainExecutionContext(mailboxService, requestId, deadlineMs, opChainMetadata, stagePlan.getStageMetadata(), workerMetadata, null); return execute(scheduler, pipelineBreakerContext, opChainExecutionContext); } catch (Exception e) { LOGGER.error("Caught exception executing pipeline breaker for request: {}, stage: {}", requestId, stagePlan.getStageMetadata().getStageId(), e); return new PipelineBreakerResult(pipelineBreakerContext.getNodeIdMap(), Collections.emptyMap(), TransferableBlockUtils.getErrorTransferableBlock(e), null); } } else { return null; } }
@Test public void shouldWorkWithMultiplePBNodeUponNormalOperation() { MailboxReceiveNode mailboxReceiveNode1 = getPBReceiveNode(1); MailboxReceiveNode mailboxReceiveNode2 = getPBReceiveNode(2); JoinNode joinNode = new JoinNode(0, DATA_SCHEMA, PlanNode.NodeHint.EMPTY, List.of(mailboxReceiveNode1, mailboxReceiveNode2), JoinRelType.INNER, List.of(0), List.of(0), List.of()); StagePlan stagePlan = new StagePlan(joinNode, _stageMetadata); // when when(_mailboxService.getReceivingMailbox(MAILBOX_ID_1)).thenReturn(_mailbox1); when(_mailboxService.getReceivingMailbox(MAILBOX_ID_2)).thenReturn(_mailbox2); Object[] row1 = new Object[]{1, 1}; Object[] row2 = new Object[]{2, 3}; when(_mailbox1.poll()).thenReturn(OperatorTestUtil.block(DATA_SCHEMA, row1), TransferableBlockUtils.getEndOfStreamTransferableBlock(OperatorTestUtil.getDummyStats(1))); when(_mailbox2.poll()).thenReturn(OperatorTestUtil.block(DATA_SCHEMA, row2), TransferableBlockUtils.getEndOfStreamTransferableBlock(OperatorTestUtil.getDummyStats(2))); PipelineBreakerResult pipelineBreakerResult = PipelineBreakerExecutor.executePipelineBreakers(_scheduler, _mailboxService, _workerMetadata, stagePlan, ImmutableMap.of(), 0, Long.MAX_VALUE); // then // should have two PB result, receive 2 data blocks, one each, EOS block shouldn't be included Assert.assertNotNull(pipelineBreakerResult); Assert.assertNull(pipelineBreakerResult.getErrorBlock()); Assert.assertEquals(pipelineBreakerResult.getResultMap().size(), 2); Iterator<List<TransferableBlock>> it = pipelineBreakerResult.getResultMap().values().iterator(); Assert.assertEquals(it.next().size(), 1); Assert.assertEquals(it.next().size(), 1); Assert.assertFalse(it.hasNext()); // should collect stats from previous stage here Assert.assertNotNull(pipelineBreakerResult.getStageQueryStats()); Assert.assertNotNull(pipelineBreakerResult.getStageQueryStats().getUpstreamStageStats(1), "Stats for stage 1 should be sent"); Assert.assertNotNull(pipelineBreakerResult.getStageQueryStats().getUpstreamStageStats(2), "Stats for stage 2 should be sent"); }
public boolean includes(String ipAddress) { if (all) { return true; } if (ipAddress == null) { throw new IllegalArgumentException("ipAddress is null."); } try { return includes(addressFactory.getByName(ipAddress)); } catch (UnknownHostException e) { return false; } }
@Test public void testWildCard() { //create MachineList with a list of of IPs MachineList ml = new MachineList("*", new TestAddressFactory()); //test for inclusion with any IP assertTrue(ml.includes("10.119.103.112")); assertTrue(ml.includes("1.2.3.4")); }
@Override public synchronized int read() throws IOException { checkNotClosed(); if (finished) { return -1; } file.readLock().lock(); try { int b = file.read(pos++); // it's ok for pos to go beyond size() if (b == -1) { finished = true; } else { file.setLastAccessTime(fileSystemState.now()); } return b; } finally { file.readLock().unlock(); } }
@Test public void testRead_wholeArray_arraySmaller() throws IOException { JimfsInputStream in = newInputStream(1, 2, 3, 4, 5, 6, 7, 8); byte[] bytes = new byte[6]; assertThat(in.read(bytes)).isEqualTo(6); assertArrayEquals(bytes(1, 2, 3, 4, 5, 6), bytes); bytes = new byte[6]; assertThat(in.read(bytes)).isEqualTo(2); assertArrayEquals(bytes(7, 8, 0, 0, 0, 0), bytes); assertEmpty(in); }
public static ServiceConfiguration convertFrom(PulsarConfiguration conf, boolean ignoreNonExistMember) throws RuntimeException { try { final ServiceConfiguration convertedConf = ServiceConfiguration.class .getDeclaredConstructor().newInstance(); Field[] confFields = conf.getClass().getDeclaredFields(); Properties sourceProperties = conf.getProperties(); Properties targetProperties = convertedConf.getProperties(); Arrays.stream(confFields).forEach(confField -> { try { confField.setAccessible(true); Field convertedConfField = ServiceConfiguration.class.getDeclaredField(confField.getName()); if (!Modifier.isStatic(convertedConfField.getModifiers()) && convertedConfField.getDeclaredAnnotation(FieldContext.class) != null) { convertedConfField.setAccessible(true); convertedConfField.set(convertedConf, confField.get(conf)); } } catch (NoSuchFieldException e) { if (!ignoreNonExistMember) { throw new IllegalArgumentException( "Exception caused while converting configuration: " + e.getMessage()); } // add unknown fields to properties try { String propertyName = confField.getName(); if (!sourceProperties.containsKey(propertyName) && confField.get(conf) != null) { targetProperties.put(propertyName, confField.get(conf)); } } catch (Exception ignoreException) { // should not happen } } catch (IllegalAccessException e) { throw new RuntimeException("Exception caused while converting configuration: " + e.getMessage()); } }); // Put the rest of properties to new config targetProperties.putAll(sourceProperties); return convertedConf; } catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e) { throw new RuntimeException("Exception caused while converting configuration: " + e.getMessage()); } }
@Test public void testConfigurationConverting_checkNonExistMember() { assertThrows(IllegalArgumentException.class, () -> PulsarConfigurationLoader.convertFrom(new MockConfiguration(), false)); }
@Udf(description = "Returns a masked version of the input string. The first n characters" + " will be replaced according to the default masking rules.") @SuppressWarnings("MethodMayBeStatic") // Invoked via reflection public String mask( @UdfParameter("input STRING to be masked") final String input, @UdfParameter("number of characters to mask from the start") final int numChars ) { return doMask(new Masker(), input, numChars); }
@Test public void shouldThrowIfLengthIsNegative() { // When: final KsqlException e = assertThrows( KsqlFunctionException.class, () -> udf.mask("AbCd#$123xy Z", -1) ); // Then: assertThat(e.getMessage(), containsString("function mask_left requires a non-negative number")); }
@Override public boolean incrementToken() throws IOException { final ArrayDeque<String> tokens = getTokens(); final CharTermAttribute termAtt = getTermAtt(); if (tokens.isEmpty()) { String[] parts; skipCounter = 0; while (input.incrementToken()) { final String text = new String(termAtt.buffer(), 0, termAtt.length()); if (text.isEmpty()) { return true; } parts = text.split("[^a-zA-Z0-9]"); if (parts.length == 0) { skipCounter += posIncrementAttribute.getPositionIncrement(); } else { if (skipCounter != 0) { posIncrementAttribute.setPositionIncrement(posIncrementAttribute.getPositionIncrement() + skipCounter); } for (String part : parts) { if (!part.isEmpty()) { tokens.add(part); } } break; } } } return addTerm(); }
@Test public void testIncrementToken() throws Exception { String[] expected = new String[6]; expected[0] = "http"; expected[1] = "www"; expected[2] = "domain"; expected[3] = "com"; expected[4] = "test"; expected[5] = "php"; assertAnalyzesTo(analyzer, "http://www.domain.com/test.php", expected); }
public void deleteKVConfigValue(final String namespace, final String key, final long timeoutMillis) throws RemotingException, MQClientException, InterruptedException { DeleteKVConfigRequestHeader requestHeader = new DeleteKVConfigRequestHeader(); requestHeader.setNamespace(namespace); requestHeader.setKey(key); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.DELETE_KV_CONFIG, requestHeader); List<String> nameServerAddressList = this.remotingClient.getNameServerAddressList(); if (nameServerAddressList != null) { RemotingCommand errResponse = null; for (String namesrvAddr : nameServerAddressList) { RemotingCommand response = this.remotingClient.invokeSync(namesrvAddr, request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { break; } default: errResponse = response; } } if (errResponse != null) { throw new MQClientException(errResponse.getCode(), errResponse.getRemark()); } } }
@Test public void testDeleteKVConfigValue() throws RemotingException, InterruptedException, MQClientException { mockInvokeSync(); mqClientAPI.deleteKVConfigValue("", "", defaultTimeout); }
@Override public String encode(Cookie cookie) { return cookie.getName() + EQUAL + (cookie.getValue() != null ? cookie.getValue() : ""); }
@Test void clientCookieEncoding() { ClientCookieEncoder cookieEncoder = new DefaultClientCookieEncoder(); Cookie cookie = Cookie.of("SID", "31d4d96e407aad42").path("/").domain("example.com"); assertEquals("SID=31d4d96e407aad42", cookieEncoder.encode(cookie)); }
@Override public void dump(OutputStream output) { try (PrintWriter out = new PrintWriter(new OutputStreamWriter(output, UTF_8))) { for (long value : values) { out.printf("%d%n", value); } } }
@Test public void dumpsToAStream() { final ByteArrayOutputStream output = new ByteArrayOutputStream(); snapshot.dump(output); assertThat(output.toString()) .isEqualTo(String.format("1%n2%n3%n4%n5%n")); }
@Transactional public void payInstallment(String identificationNumber, int creditId, int installmentId) { Credit credit = creditRepository.findByIdAndIdentificationNumber(creditId, identificationNumber) .orElseThrow(() -> createCreditNotFoundException(creditId)); Installment installment = installmentRepository.findByCredit(credit) .stream() .filter(c -> c.getStatus() == InstallmentStatus.UNPAID) .filter(c -> c.getId() == installmentId) .findFirst() .orElseThrow(() -> createInstallmentNotFoundException(installmentId)); installment.setStatus(InstallmentStatus.PAID); Installment savedInstallment = installmentRepository.save(installment); eventPublisherService.publishInstallment(getInstallmentDocumentFromInstallment(savedInstallment, credit)); if (installmentRepository.findByCredit(credit).stream().noneMatch(c -> c.getStatus() == InstallmentStatus.UNPAID)) { credit.setStatus(CreditStatus.FINISHED); Credit finishedCredit = creditRepository.save(credit); eventPublisherService.publishPaidCreditInstallment(finishedCredit); } eventPublisherService.publishCredit(getCreditDocumentFromCredit(credit)); }
@Test void payInstallment_creditNotFound() { // Arrange String identificationNumber = "1234567890"; int creditId = 1; int installmentId = 1; when(creditRepository.findByIdAndIdentificationNumber(creditId, identificationNumber)).thenReturn(Optional.empty()); // Act & Assert assertThrows(GenericException.class, () -> creditService.payInstallment(identificationNumber, creditId, installmentId)); verify(installmentRepository, never()).save(any(Installment.class)); verify(eventPublisherService, never()).publishInstallment(any(InstallmentDocument.class)); verify(eventPublisherService, never()).publishCredit(any(CreditDocument.class)); }
public Set<ContentPack> findAllById(ModelId id) { final DBCursor<ContentPack> result = dbCollection.find(DBQuery.is(Identified.FIELD_META_ID, id)); return ImmutableSet.copyOf((Iterable<ContentPack>) result); }
@Test @MongoDBFixtures("ContentPackPersistenceServiceTest.json") public void findAllById() { final Set<ContentPack> contentPacks = contentPackPersistenceService.findAllById(ModelId.of("dcd74ede-6832-4ef7-9f69-deadbeef0000")); assertThat(contentPacks) .hasSize(3) .allMatch(contentPack -> contentPack.id().equals(ModelId.of("dcd74ede-6832-4ef7-9f69-deadbeef0000"))); }
public static TopicMessageType getMessageType(SendMessageRequestHeader requestHeader) { Map<String, String> properties = MessageDecoder.string2messageProperties(requestHeader.getProperties()); String traFlag = properties.get(MessageConst.PROPERTY_TRANSACTION_PREPARED); TopicMessageType topicMessageType = TopicMessageType.NORMAL; if (Boolean.parseBoolean(traFlag)) { topicMessageType = TopicMessageType.TRANSACTION; } else if (properties.containsKey(MessageConst.PROPERTY_SHARDING_KEY)) { topicMessageType = TopicMessageType.FIFO; } else if (properties.get("__STARTDELIVERTIME") != null || properties.get(MessageConst.PROPERTY_DELAY_TIME_LEVEL) != null || properties.get(MessageConst.PROPERTY_TIMER_DELIVER_MS) != null || properties.get(MessageConst.PROPERTY_TIMER_DELAY_SEC) != null || properties.get(MessageConst.PROPERTY_TIMER_DELAY_MS) != null) { topicMessageType = TopicMessageType.DELAY; } return topicMessageType; }
@Test public void testGetMessageTypeAsDelayLevel() { SendMessageRequestHeader requestHeader = new SendMessageRequestHeader(); Map<String, String> map = new HashMap<>(); map.put(MessageConst.PROPERTY_DELAY_TIME_LEVEL, "1"); requestHeader.setProperties(MessageDecoder.messageProperties2String(map)); TopicMessageType result = BrokerMetricsManager.getMessageType(requestHeader); assertThat(TopicMessageType.DELAY).isEqualTo(result); }
@Override protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) { CommonLoggingRuleHandle commonLoggingRuleHandle = LoggingConsolePluginDataHandler.CACHED_HANDLE.get().obtainHandle(CacheKeyUtils.INST.getKey(rule)); Set<String> keywordSets = Sets.newHashSet(); boolean desensitized = Boolean.FALSE; KeyWordMatch keyWordMatch = new KeyWordMatch(Collections.emptySet()); if (Objects.nonNull(commonLoggingRuleHandle)) { String keywords = commonLoggingRuleHandle.getKeyword(); desensitized = StringUtils.isNotBlank(keywords) && commonLoggingRuleHandle.getMaskStatus(); if (desensitized) { Collections.addAll(keywordSets, keywords.split(";")); dataDesensitizeAlg = Optional.ofNullable(commonLoggingRuleHandle.getMaskType()).orElse(DataDesensitizeEnum.MD5_ENCRYPT.getDataDesensitizeAlg()); keyWordMatch = new KeyWordMatch(keywordSets); LOG.info("current plugin:{}, keyword:{}, dataDesensitizedAlg:{}", this.named(), keywords, dataDesensitizeAlg); } } ServerHttpRequest request = exchange.getRequest(); //"Print Request Info: " StringBuilder requestInfo = new StringBuilder().append(System.lineSeparator()); requestInfo.append(getRequestUri(request, desensitized, keyWordMatch)) .append(getRequestMethod(request, desensitized, keyWordMatch)).append(System.lineSeparator()) .append(getRequestHeaders(request, desensitized, keyWordMatch)).append(System.lineSeparator()) .append(getQueryParams(request, desensitized, keyWordMatch)).append(System.lineSeparator()); return chain.execute(exchange.mutate().request(new LoggingServerHttpRequest(request, requestInfo, desensitized, keyWordMatch)) .response(new LoggingServerHttpResponse(exchange.getResponse(), requestInfo, desensitized, keyWordMatch)).build()); }
@Test public void testDoExecute() { ServerWebExchange.Builder builder = mock(ServerWebExchange.Builder.class); when(exchange.mutate()).thenReturn(builder); when(builder.request(any(LoggingConsolePlugin.LoggingServerHttpRequest.class))).thenReturn(builder); when(builder.response(any(LoggingConsolePlugin.LoggingServerHttpResponse.class))).thenReturn(builder); when(builder.build()).thenReturn(exchange); when(chain.execute(any())).thenReturn(Mono.empty()); Mono<Void> result = loggingConsolePlugin.doExecute(exchange, chain, selectorData, ruleData); // Sorry, I do not how to mock this case by an simply way, so I give up. StepVerifier.create(result).expectSubscription().verifyComplete(); }
public String toString() { return string; }
@Test public void testLowerCase() { assertEquals("text/plain", new MediaType("TEXT", "PLAIN").toString()); assertEquals("text/plain", new MediaType("Text", "Plain").toString()); Map<String, String> parameters = new HashMap<>(); assertEquals("text/plain", new MediaType("text", "PLAIN", parameters).toString()); parameters.put("CHARSET", "UTF-8"); assertEquals("text/plain; charset=UTF-8", new MediaType("TEXT", "plain", parameters).toString()); parameters.put("X-Eol-Style", "crlf"); assertEquals("text/plain; charset=UTF-8; x-eol-style=crlf", new MediaType("TeXt", "PlAiN", parameters).toString()); }
public static void sort(Sortable data) { quickSort(data, 0, data.size() - 1); }
@Test public void testSort() { int[] expected = new int[200]; int[] actual = new int[expected.length]; for (int i = 0; i < expected.length; i++) { expected[i] = (int) (Math.random() * 900); actual[i] = expected[i]; } Arrays.sort(expected); DataUtils.sort(new SimpleSortable(actual, actual.length)); assertThat(actual, equalTo(expected)); }
public Map<String, String> decode(byte[] bytes) throws DeserializationException { Map<String, String> map = new HashMap<String, String>(); if (bytes == null || bytes.length == 0) { return map; } UnsafeByteArrayInputStream in = new UnsafeByteArrayInputStream(bytes); try { while (in.available() > 0) { String key = readString(in); String value = readString(in); if (key != null && value != null) { map.put(key, value); } } return map; } catch (IOException ex) { throw new DeserializationException(ex.getMessage(), ex); } }
@Test public void decode() throws Exception { }
@Override public Collection<String> doSharding(final Collection<String> availableTargetNames, final ComplexKeysShardingValue<Comparable<?>> shardingValue) { if (!shardingValue.getColumnNameAndRangeValuesMap().isEmpty()) { ShardingSpherePreconditions.checkState(allowRangeQuery, () -> new UnsupportedSQLOperationException(String.format("Since the property of `%s` is false, inline sharding algorithm can not tackle with range query", ALLOW_RANGE_QUERY_KEY))); return availableTargetNames; } Map<String, Collection<Comparable<?>>> columnNameAndShardingValuesMap = shardingValue.getColumnNameAndShardingValuesMap(); ShardingSpherePreconditions.checkState(shardingColumns.isEmpty() || shardingColumns.size() == columnNameAndShardingValuesMap.size(), () -> new MismatchedComplexInlineShardingAlgorithmColumnAndValueSizeException(shardingColumns.size(), columnNameAndShardingValuesMap.size())); return flatten(columnNameAndShardingValuesMap).stream().map(this::doSharding).collect(Collectors.toList()); }
@Test void assertDoSharding() { Properties props = PropertiesBuilder.build(new Property("algorithm-expression", "t_order_${type % 2}_${order_id % 2}"), new Property("sharding-columns", "type,order_id")); ComplexInlineShardingAlgorithm algorithm = (ComplexInlineShardingAlgorithm) TypedSPILoader.getService(ShardingAlgorithm.class, "COMPLEX_INLINE", props); List<String> availableTargetNames = Arrays.asList("t_order_0_0", "t_order_0_1", "t_order_1_0", "t_order_1_1"); Collection<String> actual = algorithm.doSharding(availableTargetNames, createComplexKeysShardingValue(Collections.singletonList(2))); assertTrue(1 == actual.size() && actual.contains("t_order_0_0")); }
public static void executeSql( DatabaseMeta databaseMeta, String sql, Consumer<List<Object[]>> rowConsumer ) { executeAction( databaseMeta, database -> { try { rowConsumer.accept( database.getRows( sql, ROW_LIMIT ) ); } catch ( KettleDatabaseException | NullPointerException e ) { logError( databaseMeta, e ); rowConsumer.accept( Collections.emptyList() ); } } ); }
@SuppressWarnings( "squid:S2699" ) // assertion is implicit. non-timeout validates that an error event was logged @Test public void testDbErrorsSqlAction() throws InterruptedException, ExecutionException, TimeoutException { dbMeta.setDatabaseType( "GENERIC" ); // causes incorrect jdbc url to be used. CompletableFuture<List<Object[]>> rowMetaCompletion = new CompletableFuture<>(); AsyncDatabaseAction.executeSql( dbMeta, "SELECT * FROM BAR", rowMetaCompletion::complete ); // blocks till an error message is logged, or timeoutexception errorLogListener.errorOccurred.get( COMPLETION_TIMEOUT, TimeUnit.MILLISECONDS ); }
@Nonnull public static List<IndexIterationPointer> normalizePointers(@Nonnull List<IndexIterationPointer> result, boolean descending) { if (result.size() <= 1) { // single pointer, nothing to do return result; } // without the same ordering of pointers order of results would be unspecified assert result.stream().allMatch(r -> r.isDescending() == descending) : "All iteration pointers must have the same direction"; // order of ranges is critical for preserving ordering of the results Collections.sort(result, descending ? POINTER_COMPARATOR_REVERSED : POINTER_COMPARATOR); // loop until we processed the last remaining pair // // do the normalization in place without extra shifts in the array // we write normalized pointers from the beginning int writeIdx = 0; IndexIterationPointer currentMerged = result.get(0); for (int nextPointerIdx = 1; nextPointerIdx < result.size(); nextPointerIdx++) { // compare current pointer with next one and merge if they overlap // otherwise go to next pointer // pointers might be ordered in descending way but util methods expect ascending order of arguments IndexIterationPointer next = result.get(nextPointerIdx); if (!descending && overlapsOrdered(currentMerged, next, OrderedIndexStore.SPECIAL_AWARE_COMPARATOR)) { // merge overlapping ranges currentMerged = union(currentMerged, next, OrderedIndexStore.SPECIAL_AWARE_COMPARATOR); } else if (descending && overlapsOrdered(next, currentMerged, OrderedIndexStore.SPECIAL_AWARE_COMPARATOR)) { // merge overlapping ranges currentMerged = union(next, currentMerged, OrderedIndexStore.SPECIAL_AWARE_COMPARATOR); } else { // write current pointer and advance result.set(writeIdx++, currentMerged); currentMerged = next; } } // write last remaining pointer result.set(writeIdx++, currentMerged); return result.subList(0, writeIdx); }
@Test void normalizePointersOrder() { assertThat(normalizePointers(arrayListOf( pointer(singleton(6)), pointer(singleton(5))), false)) .as("Should order and not merge non overlapping ranges") .containsExactly(pointer(singleton(5)), pointer(singleton(6))); assertThat(normalizePointers(arrayListOf( pointer(singleton(6)), pointer(lessThan(5))), false)) .as("Should order and not merge non overlapping ranges") .containsExactly(pointer(lessThan(5)), pointer(singleton(6))); assertThat(normalizePointers(arrayListOf( pointer(greaterThan(6)), pointer(singleton(5))), false)) .as("Should order and not merge non overlapping ranges") .containsExactly(pointer(singleton(5)), pointer(greaterThan(6))); assertThat(normalizePointers(arrayListOf( pointer(singleton(5), true), pointer(singleton(6), true)), true)) .as("Should order and not merge non overlapping desc ranges") .containsExactly(pointer(singleton(6), true), pointer(singleton(5), true)); }
@SuppressWarnings("unchecked") protected Set<PathSpec> getFields() { Object fields = _queryParams.get(RestConstants.FIELDS_PARAM); if (fields == null) { return Collections.emptySet(); } if (fields instanceof Set) { return (Set<PathSpec>) fields; } else if (fields instanceof String) { try { MaskTree tree = URIMaskUtil.decodeMaskUriFormat((String) fields); return tree.getOperations().keySet(); } catch (IllegalMaskException e) { throw new IllegalArgumentException("Field param was a string and it did not represent a serialized mask tree", e); } } else if (fields instanceof DataMap) { MaskTree tree = new MaskTree((DataMap) fields); return tree.getOperations().keySet(); } throw new IllegalArgumentException("Fields param is of unrecognized type: " + fields.getClass()); }
@Test(expectedExceptions = {IllegalArgumentException.class}) public void testInvalidFieldsParam() { GetRequest<TestRecord> getRequest = generateDummyRequestBuilder().setParam(RestConstants.FIELDS_PARAM, 100).build(); getRequest.getFields(); }
@Override public Optional<PersistentQueryMetadata> getPersistentQuery(final QueryId queryId) { return Optional.ofNullable(persistentQueries.get(queryId)); }
@Test public void shouldCallListenerOnClose() { // Given: final QueryMetadata.Listener queryListener = givenCreateGetListener(registry, "foo"); final QueryMetadata query = registry.getPersistentQuery(new QueryId("foo")).get(); // When: queryListener.onClose(query); // Then: verify(listener1).onClose(query); verify(listener2).onClose(query); }
protected String parseVersion(String output) { Matcher cdhMatcher = CDH_PATTERN.matcher(output); // Use CDH version if it is CDH if (cdhMatcher.find()) { String cdhVersion = cdhMatcher.group("cdhVersion"); return "cdh" + cdhVersion; } // Use Hadoop version otherwise String version = ""; Matcher matcher = HADOOP_PATTERN.matcher(output); if (matcher.find()) { version = matcher.group("version"); } return version; }
@Test public void cdhVersionParsing() { String versionStr = "Hadoop 2.6.0-cdh5.16.2\n" + "Subversion http://github.com/cloudera/hadoop -r " + "4f94d60caa4cbb9af0709a2fd96dc3861af9cf20\n" + "Compiled by jenkins on 2019-06-03T10:41Z\n" + "Compiled with protoc 2.5.0\n" + "From source with checksum 79b9b24a29c6358b53597c3b49575e37\n" + "This command was run using /usr/lib/hadoop/hadoop-common-2.6.0-cdh5.16.2.jar"; HdfsVersionValidationTask task = new HdfsVersionValidationTask(CONF); String version = task.parseVersion(versionStr); assertEquals("cdh5.16.2", version); }
@Override public boolean supportsPath(String path) { return path != null && (path.startsWith(Constants.HEADER_S3A) || path.startsWith(Constants.HEADER_S3)); }
@Test public void supportsPath() { assertTrue(mFactory1.supportsPath(mS3APath)); assertTrue(mFactory1.supportsPath(mS3Path)); assertFalse(mFactory1.supportsPath(mS3NPath)); assertFalse(mFactory1.supportsPath(null)); assertFalse(mFactory1.supportsPath("Invalid_Path")); assertFalse(mFactory1.supportsPath("hdfs://test-bucket/path")); }
public ApolloAuditSpan getActiveSpan() { ApolloAuditSpan activeSpan = getActiveSpanFromContext(); if (activeSpan != null) { return activeSpan; } activeSpan = getActiveSpanFromHttp(); // might be null, root span generate should be done in other place return activeSpan; }
@Test public void testGetActiveSpanFromHttpRequestCaseInRequestThread() { final String httpParentId = "100010002"; final String httpFollowsFromId = "100010003"; { // no span would be in context Mockito.when(manager.activeSpan()).thenReturn(null); // in request thread HttpServletRequest request = Mockito.mock(HttpServletRequest.class); RequestContextHolder.setRequestAttributes(new ServletRequestAttributes(request)); Mockito.when(request.getHeader(Mockito.eq(ApolloAuditConstants.TRACE_ID))) .thenReturn(activeTraceId); Mockito.when(request.getHeader(Mockito.eq(ApolloAuditConstants.SPAN_ID))) .thenReturn(activeSpanId); Mockito.when(request.getHeader(Mockito.eq(ApolloAuditConstants.OPERATOR))) .thenReturn(operator); Mockito.when(request.getHeader(Mockito.eq(ApolloAuditConstants.PARENT_ID))) .thenReturn(httpParentId); Mockito.when(request.getHeader(Mockito.eq(ApolloAuditConstants.FOLLOWS_FROM_ID))) .thenReturn(httpFollowsFromId); } ApolloAuditSpan get = tracer.getActiveSpan(); assertEquals(activeTraceId, get.traceId()); assertEquals(activeSpanId, get.spanId()); assertEquals(operator, get.operator()); assertEquals(httpParentId, get.parentId()); assertEquals(httpFollowsFromId, get.followsFromId()); assertNull(get.getOpType()); assertNull(get.getOpName()); }
public static Table resolveCalciteTable(SchemaPlus schemaPlus, List<String> tablePath) { Schema subSchema = schemaPlus; // subSchema.getSubschema() for all except last for (int i = 0; i < tablePath.size() - 1; i++) { subSchema = subSchema.getSubSchema(tablePath.get(i)); if (subSchema == null) { throw new IllegalStateException( String.format( "While resolving table path %s, no sub-schema found for component %s (\"%s\")", tablePath, i, tablePath.get(i))); } } // for the final one call getTable() return subSchema.getTable(Iterables.getLast(tablePath)); }
@Test public void testResolveWithDots() { String tableName = "fake.table"; when(mockSchemaPlus.getTable(tableName)).thenReturn(mockTable); Table table = TableResolution.resolveCalciteTable(mockSchemaPlus, ImmutableList.of(tableName)); assertThat(table, Matchers.is(mockTable)); }
public static List<Predicate> fromExpression(List<ResolvedExpression> resolvedExpressions) { return resolvedExpressions.stream() .map(e -> fromExpression((CallExpression) e)) .collect(Collectors.toList()); }
@Test public void testDisablePredicatesPushDownForUnsupportedType() { FieldReferenceExpression fieldReference = new FieldReferenceExpression("f_decimal", DataTypes.DECIMAL(7, 2), 0, 0); ValueLiteralExpression valueLiteral = new ValueLiteralExpression(BigDecimal.valueOf(100.00)); List<ResolvedExpression> expressions = Arrays.asList(fieldReference, valueLiteral); CallExpression greaterThanExpression = new CallExpression(BuiltInFunctionDefinitions.GREATER_THAN, expressions, DataTypes.DECIMAL(7, 2)); Predicate greaterThanPredicate = fromExpression(greaterThanExpression); CallExpression lessThanExpression = new CallExpression(BuiltInFunctionDefinitions.LESS_THAN, expressions, DataTypes.DECIMAL(7, 2)); Predicate lessThanPredicate = fromExpression(lessThanExpression); assertNull(And.getInstance().bindPredicates(greaterThanPredicate, lessThanPredicate).filter(), "Decimal type push down is unsupported, so we expect null"); assertNull(Or.getInstance().bindPredicates(greaterThanPredicate, lessThanPredicate).filter(), "Decimal type push down is unsupported, so we expect null"); assertNull(Not.getInstance().bindPredicate(greaterThanPredicate).filter(), "Decimal type push down is unsupported, so we expect null"); }
@Override public final boolean accept(EdgeIteratorState iter) { if (!edgeFilter.accept(iter)) { return false; } if (pointHint.isEmpty()) { return true; } String name = iter.getName(); if (name == null || name.isEmpty()) { return false; } BBox bbox = createBBox(iter); if (!pointCircle.intersects(bbox)) { return false; } name = removeRelation(name); String edgeName = prepareName(name); return isJaroWinklerSimilar(pointHint, edgeName); }
@Test public void testWithDash() { EdgeIteratorState edge = createTestEdgeIterator("Ben-Gurion-Straße"); assertTrue(createNameSimilarityEdgeFilter("Ben-Gurion").accept(edge)); assertTrue(createNameSimilarityEdgeFilter("Ben Gurion").accept(edge)); assertTrue(createNameSimilarityEdgeFilter("Ben Gurion Strasse").accept(edge)); assertFalse(createNameSimilarityEdgeFilter("Potsdamer Str.").accept(edge)); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testGroupByWithExistsSelectExpression() { analyze("SELECT EXISTS(SELECT t1.a) FROM t1 GROUP BY a"); analyze("SELECT EXISTS(SELECT a) FROM t1 GROUP BY t1.a"); // u.a is not GROUP-ed BY and it is used in select Subquery expression analyze("SELECT EXISTS(SELECT u.a FROM (values 1) u(a)) " + "FROM t1 u GROUP BY b"); assertFails( MUST_BE_AGGREGATE_OR_GROUP_BY, "line 1:22: Subquery uses 'u.a' which must appear in GROUP BY clause", "SELECT EXISTS(SELECT u.a from (values 1) x(a)) FROM t1 u GROUP BY b"); assertFails( MUST_BE_AGGREGATE_OR_GROUP_BY, "line 1:22: Subquery uses 'a' which must appear in GROUP BY clause", "SELECT EXISTS(SELECT a+2) FROM t1 GROUP BY a+1"); assertFails( MUST_BE_AGGREGATE_OR_GROUP_BY, "line 1:42: Subquery uses 'u.a' which must appear in GROUP BY clause", "SELECT EXISTS(SELECT 1 FROM t1 WHERE a = u.a) FROM t1 u GROUP BY b"); // (t1.)a is not part of GROUP BY assertFails(MUST_BE_AGGREGATE_OR_GROUP_BY, "SELECT EXISTS(SELECT a as a) FROM t1 GROUP BY b"); // u.a is not GROUP-ed BY but select Subquery expression is using a different (shadowing) u.a analyze("SELECT EXISTS(SELECT 1 FROM t1 u WHERE a = u.a) FROM t1 u GROUP BY b"); }
public static String interpolate(String text, Properties properties) { return interpolate(text, properties, SyntaxStyle.DEFAULT); }
@Test public void testInterpolate() { Properties prop = new Properties(); prop.setProperty("key", "value"); prop.setProperty("nested", "nested ${key}"); String text = "This is a test of '${key}' '${nested}'"; String expResults = "This is a test of 'value' 'nested value'"; String results = InterpolationUtil.interpolate(text, prop); assertEquals(expResults, results); }
public void abortTransaction() throws ProducerFencedException { throwIfNoTransactionManager(); throwIfProducerClosed(); log.info("Aborting incomplete transaction"); long abortStart = time.nanoseconds(); TransactionalRequestResult result = transactionManager.beginAbort(); sender.wakeup(); result.await(maxBlockTimeMs, TimeUnit.MILLISECONDS); producerMetrics.recordAbortTxn(time.nanoseconds() - abortStart); }
@Test public void testAbortTransaction() { Map<String, Object> configs = new HashMap<>(); configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id"); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); Time time = new MockTime(1); MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1)); ProducerMetadata metadata = newMetadata(0, 0, Long.MAX_VALUE); MockClient client = new MockClient(time, metadata); client.updateMetadata(initialUpdateResponse); client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE)); client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE)); client.prepareResponse(endTxnResponse(Errors.NONE)); try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) { producer.initTransactions(); producer.beginTransaction(); producer.abortTransaction(); } }
public static String unescape(String source) { byte[] bytes = source.getBytes(UTF8); ByteArrayOutputStream result = new ByteArrayOutputStream(); for (int i=0; i<bytes.length; ++i) { if (bytes[i] != '\\') { result.write(bytes[i]); continue; } if (i + 1 == bytes.length) throw new IllegalArgumentException("Found backslash at end of input"); if (bytes[i + 1] != (byte) 'x') { switch (bytes[i + 1]) { case '\\' -> result.write('\\'); case '"' -> result.write('"'); case 't' -> result.write('\t'); case 'n' -> result.write('\n'); case 'r' -> result.write('\r'); case 'f' -> result.write('\f'); default -> throw new IllegalArgumentException("Illegal escape sequence \\" + ((char) bytes[i + 1]) + " found"); } ++i; continue; } if (i + 3 >= bytes.length) throw new IllegalArgumentException("Found \\x at end of input"); String hexdigits = "" + ((char) bytes[i + 2]) + ((char) bytes[i + 3]); result.write((byte) Integer.parseInt(hexdigits, 16)); i += 3; } return result.toString(UTF8); }
@Test public void testUnescape() { assertEquals("abz019ABZ", StringUtilities.unescape("abz019ABZ")); assertEquals("\t", StringUtilities.unescape("\\t")); assertEquals("\n", StringUtilities.unescape("\\n")); assertEquals("\r", StringUtilities.unescape("\\r")); assertEquals("\"", StringUtilities.unescape("\\\"")); assertEquals("\f", StringUtilities.unescape("\\f")); assertEquals("\\", StringUtilities.unescape("\\\\")); assertEquals("" + (char) 5, StringUtilities.unescape("\\x05")); assertEquals("\tA\ncombined\r" + ((char) 5) + "5test", StringUtilities.unescape("\\tA\\ncombined\\r\\x055test")); assertEquals("A space separated string", StringUtilities.unescape("A\\x20space\\x20separated\\x20string")); }
public long maxConcurrentStreams() { return maxConcurrentStreams; }
@Test void maxConcurrentStreams() { builder.maxConcurrentStreams(2); Http2AllocationStrategy strategy = builder.build(); assertThat(strategy.maxConcurrentStreams()).isEqualTo(2); assertThat(strategy.permitMaximum()).isEqualTo(DEFAULT_MAX_CONNECTIONS); assertThat(strategy.permitMinimum()).isEqualTo(DEFAULT_MIN_CONNECTIONS); }
public void updateTopicConfig(final TopicConfig topicConfig) { updateSingleTopicConfigWithoutPersist(topicConfig); this.persist(topicConfig.getTopicName(), topicConfig); }
@Test public void testDeleteNonexistentKeyOnUpdating() { String key = "nonexisting.key"; supportAttributes(asList( new EnumAttribute("enum.key", true, newHashSet("enum-1", "enum-2", "enum-3"), "enum-1"), new BooleanAttribute("bool.key", false, false), new LongRangeAttribute("long.range.key", true, 10, 20, 15) )); Map<String, String> attributes = new HashMap<>(); attributes.put("+enum.key", "enum-2"); attributes.put("+bool.key", "true"); TopicConfig topicConfig = new TopicConfig(); topicConfig.setTopicName("new-topic"); topicConfig.setAttributes(attributes); topicConfigManager.updateTopicConfig(topicConfig); attributes = new HashMap<>(); attributes.clear(); attributes.put("-" + key, ""); topicConfig.setAttributes(attributes); RuntimeException runtimeException = Assert.assertThrows(RuntimeException.class, () -> topicConfigManager.updateTopicConfig(topicConfig)); Assert.assertEquals("attempt to delete a nonexistent key: " + key, runtimeException.getMessage()); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) @Override public ClusterInfo get() { return getClusterInfo(); }
@Test public void testClusterMetricsSlash() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("cluster") .path("metrics/").accept(MediaType.APPLICATION_JSON) .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); JSONObject json = response.getEntity(JSONObject.class); verifyClusterMetricsJSON(json); }
@Override public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback); return bean; }
@Test void beansWithUnsupportedMethodsAnnotatedWithRecurringAnnotationWillThrowException() { final RecurringJobPostProcessor recurringJobPostProcessor = getRecurringJobPostProcessor(); assertThatThrownBy(() -> recurringJobPostProcessor.postProcessAfterInitialization(new MyUnsupportedService(), "not important")) .isInstanceOf(IllegalStateException.class) .hasMessage("Methods annotated with " + Recurring.class.getName() + " can only have zero parameters or a single parameter of type JobContext."); }
@Override public long getPos() throws IOException { return fsDataInputStream.getPos(); }
@Test public void testSeekSkip() throws IOException { verifyInputStream = spy( new FSDataInputStream( new SeekableByteArrayInputStream( new byte[2 * HadoopDataInputStream.MIN_SKIP_BYTES]))); testInputStream = new HadoopDataInputStream(verifyInputStream); seekAndAssert(10); seekAndAssert(10 + HadoopDataInputStream.MIN_SKIP_BYTES + 1); seekAndAssert(testInputStream.getPos() - 1); seekAndAssert(testInputStream.getPos() + 1); seekAndAssert(testInputStream.getPos() - HadoopDataInputStream.MIN_SKIP_BYTES); seekAndAssert(testInputStream.getPos()); seekAndAssert(0); seekAndAssert(testInputStream.getPos() + HadoopDataInputStream.MIN_SKIP_BYTES); seekAndAssert(testInputStream.getPos() + HadoopDataInputStream.MIN_SKIP_BYTES - 1); try { seekAndAssert(-1); Assert.fail(); } catch (Exception ignore) { } try { seekAndAssert(-HadoopDataInputStream.MIN_SKIP_BYTES - 1); Assert.fail(); } catch (Exception ignore) { } }
@Override protected boolean onJoinPrepare(Timer timer, int generation, String memberId) { log.debug("Executing onJoinPrepare with generation {} and memberId {}", generation, memberId); if (joinPrepareTimer == null) { // We should complete onJoinPrepare before rebalanceTimeoutMs, // and continue to join group to avoid member got kicked out from group joinPrepareTimer = time.timer(rebalanceConfig.rebalanceTimeoutMs); } else { joinPrepareTimer.update(); } // async commit offsets prior to rebalance if auto-commit enabled // and there is no in-flight offset commit request if (autoCommitEnabled && autoCommitOffsetRequestFuture == null) { maybeMarkPartitionsPendingRevocation(); autoCommitOffsetRequestFuture = maybeAutoCommitOffsetsAsync(); } // wait for commit offset response before timer expired if (autoCommitOffsetRequestFuture != null) { Timer pollTimer = timer.remainingMs() < joinPrepareTimer.remainingMs() ? timer : joinPrepareTimer; client.poll(autoCommitOffsetRequestFuture, pollTimer); joinPrepareTimer.update(); // Keep retrying/waiting the offset commit when: // 1. offset commit haven't done (and joinPrepareTimer not expired) // 2. failed with retriable exception (and joinPrepareTimer not expired) // Otherwise, continue to revoke partitions, ex: // 1. if joinPrepareTimer has expired // 2. if offset commit failed with non-retriable exception // 3. if offset commit success boolean onJoinPrepareAsyncCommitCompleted = true; if (joinPrepareTimer.isExpired()) { log.error("Asynchronous auto-commit of offsets failed: joinPrepare timeout. Will continue to join group"); } else if (!autoCommitOffsetRequestFuture.isDone()) { onJoinPrepareAsyncCommitCompleted = false; } else if (autoCommitOffsetRequestFuture.failed() && autoCommitOffsetRequestFuture.isRetriable()) { log.debug("Asynchronous auto-commit of offsets failed with retryable error: {}. Will retry it.", autoCommitOffsetRequestFuture.exception().getMessage()); onJoinPrepareAsyncCommitCompleted = false; } else if (autoCommitOffsetRequestFuture.failed() && !autoCommitOffsetRequestFuture.isRetriable()) { log.error("Asynchronous auto-commit of offsets failed: {}. Will continue to join group.", autoCommitOffsetRequestFuture.exception().getMessage()); } if (autoCommitOffsetRequestFuture.isDone()) { autoCommitOffsetRequestFuture = null; } if (!onJoinPrepareAsyncCommitCompleted) { pollTimer.sleep(Math.min(pollTimer.remainingMs(), rebalanceConfig.retryBackoffMs)); timer.update(); return false; } } // the generation / member-id can possibly be reset by the heartbeat thread // upon getting errors or heartbeat timeouts; in this case whatever is previously // owned partitions would be lost, we should trigger the callback and cleanup the assignment; // otherwise we can proceed normally and revoke the partitions depending on the protocol, // and in that case we should only change the assignment AFTER the revoke callback is triggered // so that users can still access the previously owned partitions to commit offsets etc. Exception exception = null; final SortedSet<TopicPartition> revokedPartitions = new TreeSet<>(COMPARATOR); if (generation == Generation.NO_GENERATION.generationId || memberId.equals(Generation.NO_GENERATION.memberId)) { revokedPartitions.addAll(subscriptions.assignedPartitions()); if (!revokedPartitions.isEmpty()) { log.info("Giving away all assigned partitions as lost since generation/memberID has been reset," + "indicating that consumer is in old state or no longer part of the group"); exception = rebalanceListenerInvoker.invokePartitionsLost(revokedPartitions); subscriptions.assignFromSubscribed(Collections.emptySet()); } } else { switch (protocol) { case EAGER: // revoke all partitions revokedPartitions.addAll(subscriptions.assignedPartitions()); exception = rebalanceListenerInvoker.invokePartitionsRevoked(revokedPartitions); subscriptions.assignFromSubscribed(Collections.emptySet()); break; case COOPERATIVE: // only revoke those partitions that are not in the subscription anymore. Set<TopicPartition> ownedPartitions = new HashSet<>(subscriptions.assignedPartitions()); revokedPartitions.addAll(ownedPartitions.stream() .filter(tp -> !subscriptions.subscription().contains(tp.topic())) .collect(Collectors.toSet())); if (!revokedPartitions.isEmpty()) { exception = rebalanceListenerInvoker.invokePartitionsRevoked(revokedPartitions); ownedPartitions.removeAll(revokedPartitions); subscriptions.assignFromSubscribed(ownedPartitions); } break; } } isLeader = false; subscriptions.resetGroupSubscription(); joinPrepareTimer = null; autoCommitOffsetRequestFuture = null; timer.update(); if (exception != null) { throw new KafkaException("User rebalance callback throws an error", exception); } return true; }
@Test public void testOnJoinPrepareWithOffsetCommitShouldKeepJoinAfterNonRetryableException() { try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, true, Optional.empty(), false)) { int generationId = 42; String memberId = "consumer-42"; Timer pollTimer = time.timer(100L); client.prepareResponse(offsetCommitResponse(singletonMap(t1p, Errors.UNKNOWN_MEMBER_ID))); boolean res = coordinator.onJoinPrepare(pollTimer, generationId, memberId); assertTrue(res); assertFalse(client.hasPendingResponses()); assertFalse(client.hasInFlightRequests()); assertFalse(coordinator.coordinatorUnknown()); } }
@VisibleForTesting ServerStats getServerStats() { return this.serverStats; }
@Test void serverStatsForEmptySentinel() { Truth.assertThat(DiscoveryResult.EMPTY.getServerStats().toString()).isEqualTo("no stats configured for server"); }
@Override public XmlStringBuilder toXML(XmlEnvironment xmlEnvironment) { return new XmlStringBuilder(this) .attribute(ATTR_STAMP, getStamp()) .rightAngleBracket() .append(getOriginId()) .closeElement(this); }
@Test public void serializationTest() throws ParseException { Date stamp = XmppDateTime.parseXEP0082Date("2019-09-20T23:08:25.000+00:00"); OriginIdElement originId = new OriginIdElement("origin-id-1"); RetractedElement retractedElement = new RetractedElement(stamp, originId); String expectedXml = "" + "<retracted stamp='2019-09-20T23:08:25.000+00:00' xmlns='urn:xmpp:message-retract:0'>\n" + " <origin-id xmlns='urn:xmpp:sid:0' id='origin-id-1'/>\n" + "</retracted>"; assertXmlSimilar(expectedXml, retractedElement.toXML()); }
public static List<TypedExpression> coerceCorrectConstructorArguments( final Class<?> type, List<TypedExpression> arguments, List<Integer> emptyCollectionArgumentsIndexes) { Objects.requireNonNull(type, "Type parameter cannot be null as the method searches constructors from that class!"); Objects.requireNonNull(arguments, "Arguments parameter cannot be null! Use an empty list instance if needed instead."); Objects.requireNonNull(emptyCollectionArgumentsIndexes, "EmptyListArgumentIndexes parameter cannot be null! Use an empty list instance if needed instead."); if (emptyCollectionArgumentsIndexes.size() > arguments.size()) { throw new IllegalArgumentException("There cannot be more empty collection arguments than all arguments! emptyCollectionArgumentsIndexes parameter has more items than arguments parameter. " + "(" + emptyCollectionArgumentsIndexes.size() + " > " + arguments.size() + ")"); } // Rather work only with the argumentsType and when a method is resolved, flip the arguments list based on it. final List<TypedExpression> coercedArgumentsTypesList = new ArrayList<>(arguments); Constructor<?> constructor = resolveConstructor(type, coercedArgumentsTypesList); if (constructor != null) { return coercedArgumentsTypesList; } else { // This needs to go through all possible combinations. final int indexesListSize = emptyCollectionArgumentsIndexes.size(); for (int numberOfProcessedIndexes = 0; numberOfProcessedIndexes < indexesListSize; numberOfProcessedIndexes++) { for (int indexOfEmptyListIndex = numberOfProcessedIndexes; indexOfEmptyListIndex < indexesListSize; indexOfEmptyListIndex++) { switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex)); constructor = resolveConstructor(type, coercedArgumentsTypesList); if (constructor != null) { return coercedArgumentsTypesList; } switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex)); } switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(numberOfProcessedIndexes)); } // No constructor found, return the original arguments. return arguments; } }
@Test public void coerceCorrectConstructorArgumentsList() { final List<TypedExpression> arguments = List.of(new ListExprT(new ListCreationLiteralExpression(null, NodeList.nodeList()))); final List<TypedExpression> coercedArguments = MethodResolutionUtils.coerceCorrectConstructorArguments( Person.class, arguments, List.of(0)); Assertions.assertThat(coercedArguments).containsExactlyElementsOf(arguments); }
@Override public <V> Map<String, V> get(String... keys) { RFuture<Map<String, V>> future = getAsync(keys); return commandExecutor.get(future); }
@Test public void testGet() { redisson.getBucket("test1").set("someValue1"); redisson.getBucket("test2").delete(); redisson.getBucket("test3").set("someValue3"); redisson.getBucket("test4").delete(); Map<String, String> result = redisson.getBuckets().get("test1", "test2", "test3", "test4"); Map<String, String> expected = new HashMap<String, String>(); expected.put("test1", "someValue1"); expected.put("test3", "someValue3"); assertThat(expected).isEqualTo(result); }