focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public String getUrl() { return url != null ? url.originalArgument() : null; }
@Test void shouldReturnTheUrl() { String url = "git@github.com/my/repo"; HgMaterialConfig config = hg(url, null); assertThat(config.getUrl()).isEqualTo(url); }
@VisibleForTesting public ProcessContinuation run( RestrictionTracker<OffsetRange, Long> tracker, OutputReceiver<PartitionRecord> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator, InitialPipelineState initialPipelineState) throws Exception { LOG.debug("DNP: Watermark: " + watermarkEstimator.getState()); LOG.debug("DNP: CurrentTracker: " + tracker.currentRestriction().getFrom()); if (tracker.currentRestriction().getFrom() == 0L) { if (!tracker.tryClaim(0L)) { LOG.error( "Could not claim initial DetectNewPartition restriction. No partitions are outputted."); return ProcessContinuation.stop(); } watermarkEstimator.setWatermark(initialPipelineState.getStartTime()); if (initialPipelineState.isResume()) { resumeFromPreviousPipelineAction.run(receiver); } else { generateInitialPartitionsAction.run(receiver, initialPipelineState.getStartTime()); } return ProcessContinuation.resume(); } // Create a new partition reconciler every run to reset the state each time. partitionReconciler = new PartitionReconciler(metadataTableDao, metrics); orphanedMetadataCleaner = new OrphanedMetadataCleaner(); // Calculating the new value of watermark is a resource intensive process. We have to do a full // scan of the metadata table and then ensure we're not missing partitions and then calculate // the low watermark. This is usually a fairly fast process even with thousands of partitions. // However, sometimes this may take so long that the runner checkpoints before the watermark is // calculated. Because the checkpoint takes place before tryClaim, this forces the DoFn to // restart, wasting the resources spent calculating the watermark. On restart, we will try to // calculate the watermark again. The problem causing the slow watermark calculation can persist // leading to a crash loop. In order to ensure we persist the calculated watermark, we calculate // the watermark after successful tryClaim. Then we write to the metadata table the new // watermark. On the start of each run we read the watermark and update the DoFn's watermark. DetectNewPartitionsState detectNewPartitionsState = metadataTableDao.readDetectNewPartitionsState(); if (detectNewPartitionsState != null) { watermarkEstimator.setWatermark(detectNewPartitionsState.getWatermark()); } // Terminate if endTime <= watermark that means all partitions have read up to or beyond // watermark. We no longer need to manage splits and merges, we can terminate. if (endTime != null && !watermarkEstimator.currentWatermark().isBefore(endTime)) { tracker.tryClaim(tracker.currentRestriction().getTo()); return ProcessContinuation.stop(); } if (!tracker.tryClaim(tracker.currentRestriction().getFrom())) { LOG.warn("DNP: Checkpointing, stopping this run: " + tracker.currentRestriction()); return ProcessContinuation.stop(); } // Read StreamPartitions to calculate watermark. List<StreamPartitionWithWatermark> streamPartitionsWithWatermark = null; if (shouldUpdateWatermark(tracker.currentRestriction().getFrom(), detectNewPartitionsState)) { streamPartitionsWithWatermark = metadataTableDao.readStreamPartitionsWithWatermark(); } // Process NewPartitions and track the ones successfully outputted. List<NewPartition> newPartitions = metadataTableDao.readNewPartitions(); List<ByteStringRange> outputtedNewPartitions = new ArrayList<>(); for (NewPartition newPartition : newPartitions) { if (processNewPartitionsAction.processNewPartition(newPartition, receiver)) { outputtedNewPartitions.add(newPartition.getPartition()); } else if (streamPartitionsWithWatermark != null) { // streamPartitionsWithWatermark is not null on runs that we update watermark. We only run // reconciliation when we update watermark. Only add incompleteNewPartitions if // reconciliation is being run partitionReconciler.addIncompleteNewPartitions(newPartition); orphanedMetadataCleaner.addIncompleteNewPartitions(newPartition); } } // Process the watermark using read StreamPartitions and NewPartitions. if (streamPartitionsWithWatermark != null) { Optional<Instant> maybeWatermark = getNewWatermark(streamPartitionsWithWatermark, newPartitions); maybeWatermark.ifPresent(metadataTableDao::updateDetectNewPartitionWatermark); // Only start reconciling after the pipeline has been running for a while. if (tracker.currentRestriction().getFrom() > 50) { // Using NewPartitions and StreamPartitions, evaluate partitions that are possibly not being // streamed. This isn't perfect because there may be partitions moving between // StreamPartitions and NewPartitions while scanning the metadata table. Also, this does not // include NewPartitions marked as deleted from a previous DNP run not yet processed by // RCSP. List<ByteStringRange> existingPartitions = streamPartitionsWithWatermark.stream() .map(StreamPartitionWithWatermark::getPartition) .collect(Collectors.toList()); existingPartitions.addAll(outputtedNewPartitions); List<ByteStringRange> missingStreamPartitions = getMissingPartitionsFromEntireKeySpace(existingPartitions); orphanedMetadataCleaner.addMissingPartitions(missingStreamPartitions); partitionReconciler.addMissingPartitions(missingStreamPartitions); processReconcilerPartitions( receiver, watermarkEstimator, initialPipelineState.getStartTime()); cleanUpOrphanedMetadata(); } } return ProcessContinuation.resume().withResumeDelay(Duration.millis(100)); }
@Test public void testAdvanceWatermarkWithAllPartitions() throws Exception { // We advance watermark on every 2 restriction tracker advancement OffsetRange offsetRange = new OffsetRange(10, Long.MAX_VALUE); when(tracker.currentRestriction()).thenReturn(offsetRange); when(tracker.tryClaim(offsetRange.getFrom())).thenReturn(true); when(tracker.tryClaim(offsetRange.getTo())).thenReturn(true); assertEquals(startTime, watermarkEstimator.currentWatermark()); assertNull(metadataTableDao.readDetectNewPartitionsState()); // Write 2 partitions to the table that covers entire keyspace. ByteStringRange partition1 = ByteStringRange.create("", "b"); Instant watermark1 = endTime.plus(Duration.millis(100)); PartitionRecord partitionRecord1 = new PartitionRecord( partition1, watermark1, UniqueIdGenerator.getNextId(), watermark1, Collections.emptyList(), null); metadataTableDao.lockAndRecordPartition(partitionRecord1); ByteStringRange partition2 = ByteStringRange.create("b", ""); Instant watermark2 = endTime.plus(Duration.millis(1)); PartitionRecord partitionRecord2 = new PartitionRecord( partition2, watermark2, UniqueIdGenerator.getNextId(), watermark2, Collections.emptyList(), null); metadataTableDao.lockAndRecordPartition(partitionRecord2); // Updating watermark does not affect the watermark estimator of this run. assertEquals( DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)), action.run( tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false))); verify(tracker, times(1)).tryClaim(offsetRange.getFrom()); // Because the 2 partitions cover the entire keyspace, the watermark should have advanced. // Also note the watermark is watermark2 which is the lowest of the 2 watermarks. Watermark // estimator isn't updated because we update the watermark estimator on the start of the run. We // do update the metadata table with the new watermark value. assertEquals(startTime, watermarkEstimator.currentWatermark()); assertEquals(watermark2, metadataTableDao.readDetectNewPartitionsState().getWatermark()); // On the 2nd run, watermark estimator is updated which is beyond endTime and terminates. assertEquals( DoFn.ProcessContinuation.stop(), action.run( tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false))); verify(tracker, times(1)).tryClaim(offsetRange.getTo()); assertEquals(watermark2, watermarkEstimator.currentWatermark()); }
public void updateView(GiantModel giantModel) { dispatcher.updateView(giantModel); }
@Test void testUpdateView() { final var model = new GiantModel("giant1", Health.HEALTHY, Fatigue.ALERT, Nourishment.SATURATED); GiantView giantView = new GiantView(); Dispatcher dispatcher = new Dispatcher(giantView); GiantController giantController = new GiantController(dispatcher); assertDoesNotThrow(() -> giantController.updateView(model)); }
@Override protected ObjectStatus getObjectStatus(String key) { try { ObjectMetadata meta = mClient.getObjectMetadata(mBucketName, key); if (meta == null) { return null; } if (isEnvironmentPFS()) { /** * When in PFS environment: * 1. Directory will be explicitly created and have object meta. * 2. File will have object meta even if there is `/` at * the end of the file name (e.g. `/dir1/file1/`). * However we should return null meta here. */ if (isDirectoryInPFS(meta)) { return null; } if (!isDirectoryInPFS(meta) && key.endsWith(PATH_SEPARATOR)) { return null; } } Date lastModifiedDate = meta.getLastModified(); Long lastModifiedTime = lastModifiedDate == null ? null : lastModifiedDate.getTime(); return new ObjectStatus(key, meta.getEtag(), meta.getContentLength(), lastModifiedTime); } catch (ObsException e) { LOG.warn("Failed to get Object {}, return null", key, e); return null; } }
@Test public void nullObjectMetaTest() throws Exception { ObjectMetadata fileMeta = new ObjectMetadata(); fileMeta.setContentLength(10L); ObjectMetadata dirMeta = new ObjectMetadata(); dirMeta.setContentLength(0L); /** * /xx/file1/ ( File1 actually exists, which is a file) , there is / after file1 name. * When OBS, the path object meta is null. * When PFS, the path object meta is not null. The object meta is same as /xx/file1 */ Mockito.when(mClient.getObjectMetadata(BUCKET_NAME, "pfs_file1")) .thenReturn(fileMeta); Mockito.when(mClient.getObjectMetadata(BUCKET_NAME, "dir1")) .thenReturn(dirMeta); mOBSUnderFileSystem = new OBSUnderFileSystem(new AlluxioURI(""), mClient, BUCKET_NAME, "obs", UnderFileSystemConfiguration.defaults(Configuration.global())); Assert.assertNotNull(mOBSUnderFileSystem.getObjectStatus("pfs_file1")); Assert.assertNotNull(mOBSUnderFileSystem.getObjectStatus("dir1")); }
@Bean public MetaDataSubscriber metaDataCacheSubscriber() { return new MetaDataCacheSubscriber(); }
@Test public void testMetaDataCacheSubscriber() { applicationContextRunner.run(context -> { MetaDataSubscriber subscriber = context.getBean("metaDataCacheSubscriber", MetaDataSubscriber.class); assertNotNull(subscriber); } ); }
@Override public void process() { JMeterContext ctx = getThreadContext(); Sampler sampler = ctx.getCurrentSampler(); if (!(sampler instanceof HTTPSamplerBase)) {// Ignore non-HTTP samplers return; } SampleResult responseText = ctx.getPreviousResult(); if (responseText == null) { return; } initRegex(getArgumentName()); String text = responseText.getResponseDataAsString(); String value; if (isPathExtension() && isPathExtensionNoEquals() && isPathExtensionNoQuestionmark()) { value = pathExtensionNoEqualsNoQuestionmarkExtractor.apply(text); } else if (isPathExtension() && isPathExtensionNoEquals()) { // && !isPathExtensionNoQuestionmark() value = pathExtensionNoEqualsQuestionmarkExtractor.apply(text); } else if (isPathExtension() && isPathExtensionNoQuestionmark()) { // && !isPathExtensionNoEquals() value = pathExtensionEqualsNoQuestionmarkExtractor.apply(text); } else if (isPathExtension()) { // && !isPathExtensionNoEquals() && !isPathExtensionNoQuestionmark() value = pathExtensionEqualsQuestionmarkExtractor.apply(text); } else { // if ! isPathExtension() value = parameterExtractor.apply(text); } // Bug 15025 - save session value across samplers if (shouldCache()) { if (value == null || value.isEmpty()) { value = savedValue; } else { savedValue = value; } } modify((HTTPSamplerBase) sampler, value); }
@Test public void testNonHTTPSampler() throws Exception { Sampler sampler = new NullSampler(); response = new SampleResult(); context.setCurrentSampler(sampler); context.setPreviousResult(response); mod.process(); }
@Override public void deleteSnapshot(Path snapshotDir, String snapshotName) throws IOException { super.deleteSnapshot(fullPath(snapshotDir), snapshotName); }
@Test(timeout = 30000) public void testDeleteSnapshot() throws Exception { Path snapRootPath = new Path("/snapPath"); Path chRootedSnapRootPath = new Path("/a/b/snapPath"); Configuration conf = new Configuration(); conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class); URI chrootUri = URI.create("mockfs://foo/a/b"); ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf); FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem()) .getRawFileSystem(); chrootFs.deleteSnapshot(snapRootPath, "snap1"); verify(mockFs).deleteSnapshot(chRootedSnapRootPath, "snap1"); }
public Matrix.Cholesky cholesky() { return cholesky(false); }
@Test public void testCholesky() { System.out.println("Cholesky"); float[][] A = { {0.9000f, 0.4000f, 0.7000f}, {0.4000f, 0.5000f, 0.3000f}, {0.7000f, 0.3000f, 0.8000f} }; float[][] L = { {0.9486833f, 0.00000000f, 0.0000000f}, {0.4216370f, 0.56764621f, 0.0000000f}, {0.7378648f, -0.01957401f, 0.5051459f} }; Matrix a = Matrix.of(A); a.uplo(UPLO.LOWER); Matrix.Cholesky cholesky = a.cholesky(); for (int i = 0; i < a.nrow(); i++) { for (int j = 0; j <= i; j++) { assertEquals(Math.abs(L[i][j]), Math.abs(cholesky.lu.get(i, j)), 1E-6f); } } float[] b = {0.5f, 0.5f, 0.5f}; float[] x = {-0.2027027f, 0.8783784f, 0.4729730f}; float[] x2 = cholesky.solve(b); assertEquals(x.length, x2.length); for (int i = 0; i < x.length; i++) { assertEquals(x[i], x2[i], 1E-6f); } float[][] B = { {0.5f, 0.2f}, {0.5f, 0.8f}, {0.5f, 0.3f} }; float[][] X = { {-0.2027027f, -1.2837838f}, { 0.8783784f, 2.2297297f}, { 0.4729730f, 0.6621622f} }; Matrix X2 = Matrix.of(B); cholesky.solve(X2); assertEquals(X.length, X2.nrow()); assertEquals(X[0].length, X2.ncol()); for (int i = 0; i < X.length; i++) { for (int j = 0; j < X[i].length; j++) { assertEquals(X[i][j], X2.get(i, j), 1E-6f); } } }
@Override public CompletableFuture<MessageExt> viewMessage(String address, ViewMessageRequestHeader requestHeader, long timeoutMillis) { CompletableFuture<MessageExt> future = new CompletableFuture<>(); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.VIEW_MESSAGE_BY_ID, requestHeader); remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> { if (response.getCode() == ResponseCode.SUCCESS) { ByteBuffer byteBuffer = ByteBuffer.wrap(response.getBody()); MessageExt messageExt = MessageDecoder.clientDecode(byteBuffer, true); future.complete(messageExt); } else { log.warn("viewMessage getResponseCommand failed, {} {}, header={}", response.getCode(), response.getRemark(), requestHeader); future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark())); } }); return future; }
@Test public void assertViewMessageWithSuccess() throws Exception { setResponseSuccess(getMessageResult()); ViewMessageRequestHeader requestHeader = mock(ViewMessageRequestHeader.class); CompletableFuture<MessageExt> actual = mqClientAdminImpl.viewMessage(defaultBrokerAddr, requestHeader, defaultTimeout); MessageExt result = actual.get(); assertNotNull(result); assertEquals(defaultTopic, result.getTopic()); }
@Override public long getPeriodMillis() { return periodMillis; }
@Test public void testGetPeriodMillis() { assertEquals(1000, plugin.getPeriodMillis()); }
@Override public long getMailTemplateCountByAccountId(Long accountId) { return mailTemplateMapper.selectCountByAccountId(accountId); }
@Test public void testCountByAccountId() { // mock 数据 MailTemplateDO dbMailTemplate = randomPojo(MailTemplateDO.class); mailTemplateMapper.insert(dbMailTemplate); // 测试 accountId 不匹配 mailTemplateMapper.insert(cloneIgnoreId(dbMailTemplate, o -> o.setAccountId(2L))); // 准备参数 Long accountId = dbMailTemplate.getAccountId(); // 调用 long count = mailTemplateService.getMailTemplateCountByAccountId(accountId); // 断言 assertEquals(1, count); }
@Override public String toString() { return "ConsulConfig{" + "url='" + url + '\'' + ", waitTime=" + waitTime + ", watchDelay=" + watchDelay + '}'; }
@Test public void testToString() { assertNotNull(consulConfig.toString()); }
public RequestLimiter(double qpsAllowed) { this(new UnaryLeapArray(10, 1000), qpsAllowed); }
@Test public void testRequestLimiter() { try (MockedStatic<TimeUtil> mocked = super.mockTimeUtil()) { setCurrentMillis(mocked, System.currentTimeMillis()); RequestLimiter limiter = new RequestLimiter(10); limiter.add(3); limiter.add(3); limiter.add(3); assertTrue(limiter.canPass()); assertEquals(9, limiter.getSum()); limiter.add(3); assertFalse(limiter.canPass()); // wait a second to refresh the window sleep(mocked, 1000); limiter.add(3); assertTrue(limiter.tryPass()); assertTrue(limiter.canPass()); assertEquals(4, limiter.getSum()); } }
public void initialize(StorageProvider storageProvider, BackgroundJobServer backgroundJobServer) { storageProviderMetricsBinder = new StorageProviderMetricsBinder(storageProvider, meterRegistry); if (backgroundJobServer != null) { backgroundJobServerMetricsBinder = new BackgroundJobServerMetricsBinder(backgroundJobServer, meterRegistry); } }
@Test void testWithStorageProviderAndBackgroundJobServerOnly() { // GIVEN JobRunrMicroMeterIntegration jobRunrMicroMeterIntegration = new JobRunrMicroMeterIntegration(meterRegistry); when(meterRegistry.more()).thenReturn(mock(MeterRegistry.More.class)); when(storageProvider.getJobStats()).thenReturn(JobStats.empty()); when(backgroundJobServer.getId()).thenReturn(UUID.randomUUID()); // WHEN jobRunrMicroMeterIntegration.initialize(storageProvider, backgroundJobServer); // THEN verify(storageProvider).getJobStats(); verify(storageProvider).addJobStorageOnChangeListener(any(StorageProviderMetricsBinder.class)); verify(meterRegistry, times(9)).gauge(any(String.class), any(Iterable.class), any(AtomicLong.class)); // WHEN assertThatCode(jobRunrMicroMeterIntegration::close).doesNotThrowAnyException(); }
IdBatchAndWaitTime newIdBaseLocal(int batchSize) { return newIdBaseLocal(Clock.currentTimeMillis(), getNodeId(), batchSize); }
@Test public void when_10mIdsInSmallBatches_then_wait() { int batchSize = 1000; for (int numIds = 0; numIds < 10_000_000; numIds += batchSize) { IdBatchAndWaitTime result = gen.newIdBaseLocal(1516028439000L, 1234, batchSize); assertEquals(Math.max(0, (numIds + batchSize) / IDS_PER_SECOND - DEFAULT_ALLOWED_FUTURE_MILLIS), result.waitTimeMillis); } }
@Override public void collect(long elapsedTime, StatementContext ctx) { final Timer timer = getTimer(ctx); timer.update(elapsedTime, TimeUnit.NANOSECONDS); }
@Test public void updatesTimerForSqlObjectsWithoutMethod() throws Exception { final StatementNameStrategy strategy = new SmartNameStrategy(); final InstrumentedTimingCollector collector = new InstrumentedTimingCollector(registry, strategy); final StatementContext ctx = mock(StatementContext.class); doReturn("SELECT 1").when(ctx).getRawSql(); doReturn(getClass()).when(ctx).getSqlObjectType(); collector.collect(TimeUnit.SECONDS.toNanos(1), ctx); final String name = strategy.getStatementName(ctx); final Timer timer = registry.timer(name); assertThat(name) .isEqualTo(name(getClass(), "SELECT 1")); assertThat(timer.getSnapshot().getMax()) .isEqualTo(1000000000); }
@Override public Stream<HoodieInstant> getCandidateInstants(HoodieTableMetaClient metaClient, HoodieInstant currentInstant, Option<HoodieInstant> lastSuccessfulInstant) { HoodieActiveTimeline activeTimeline = metaClient.reloadActiveTimeline(); if (ClusteringUtils.isClusteringInstant(activeTimeline, currentInstant) || COMPACTION_ACTION.equals(currentInstant.getAction())) { return getCandidateInstantsForTableServicesCommits(activeTimeline, currentInstant); } else { return getCandidateInstantsForNonTableServicesCommits(activeTimeline, currentInstant); } }
@Test public void testConcurrentWriteAndCompactionScheduledEarlier() throws Exception { createCommit(metaClient.createNewInstantTime(), metaClient); // consider commits before this are all successful // compaction 1 gets scheduled String newInstantTime = metaClient.createNewInstantTime(); createCompaction(newInstantTime, metaClient); HoodieActiveTimeline timeline = metaClient.getActiveTimeline(); Option<HoodieInstant> lastSuccessfulInstant = timeline.getCommitsTimeline().filterCompletedInstants().lastInstant(); // writer 1 starts String currentWriterInstant = metaClient.createNewInstantTime(); createInflightCommit(currentWriterInstant, metaClient); Option<HoodieInstant> currentInstant = Option.of(new HoodieInstant(HoodieInstant.State.INFLIGHT, HoodieTimeline.COMMIT_ACTION, currentWriterInstant)); PreferWriterConflictResolutionStrategy strategy = new PreferWriterConflictResolutionStrategy(); List<HoodieInstant> candidateInstants = strategy.getCandidateInstants(metaClient, currentInstant.get(), lastSuccessfulInstant).collect( Collectors.toList()); // writer 1 should not conflict with an earlier scheduled compaction 1 with the same file ids Assertions.assertEquals(0, candidateInstants.size()); }
@Override public synchronized DefaultConnectClient get( final Optional<String> ksqlAuthHeader, final List<Entry<String, String>> incomingRequestHeaders, final Optional<KsqlPrincipal> userPrincipal ) { if (defaultConnectAuthHeader == null) { defaultConnectAuthHeader = buildDefaultAuthHeader(); } final Map<String, Object> configWithPrefixOverrides = ksqlConfig.valuesWithPrefixOverride(KsqlConfig.KSQL_CONNECT_PREFIX); return new DefaultConnectClient( ksqlConfig.getString(KsqlConfig.CONNECT_URL_PROPERTY), buildAuthHeader(ksqlAuthHeader, incomingRequestHeaders), requestHeadersExtension .map(extension -> extension.getHeaders(userPrincipal)) .orElse(Collections.emptyMap()), Optional.ofNullable(newSslContext(configWithPrefixOverrides)), shouldVerifySslHostname(configWithPrefixOverrides), ksqlConfig.getLong(KsqlConfig.CONNECT_REQUEST_TIMEOUT_MS) ); }
@Test public void shouldReloadCredentialsOnFileChange() throws Exception { // Given: when(config.getBoolean(KsqlConfig.CONNECT_BASIC_AUTH_CREDENTIALS_RELOAD_PROPERTY)).thenReturn(true); givenCustomBasicAuthHeader(); givenValidCredentialsFile(); // verify auth header is present assertThat(connectClientFactory.get(Optional.empty(), Collections.emptyList(), Optional.empty()).getRequestHeaders(), arrayContaining(header(AUTH_HEADER_NAME, EXPECTED_HEADER))); // When: credentials file is modified waitForLastModifiedTick(); writeNewCredentialsFile(); // Then: new auth header is present assertThatEventually( "Should load updated credentials", () -> connectClientFactory.get(Optional.empty(), Collections.emptyList(), Optional.empty()).getRequestHeaders(), arrayContaining(header(AUTH_HEADER_NAME, OTHER_EXPECTED_HEADER)), TimeUnit.SECONDS.toMillis(1), TimeUnit.SECONDS.toMillis(1) ); }
@Override public String generate(TokenType tokenType) { String rawToken = generateRawToken(); return buildIdentifiablePartOfToken(tokenType) + rawToken; }
@Test public void generated_projectAnalysisToken_should_have_sqp_prefix() { String token = underTest.generate(TokenType.PROJECT_ANALYSIS_TOKEN); assertThat(token).matches("sqp_.{40}"); }
@Override public HttpRequest generateRequest() throws IOException, HttpException { return requestDecorateFunc.apply(httpAsyncRequestProducer.generateRequest()); }
@Test public void generateRequest() throws HttpException, IOException { final HttpAsyncRequestProducer delegate = Mockito.mock(HttpAsyncRequestProducer.class); AtomicBoolean isExecute = new AtomicBoolean(); final HttpRequest request = Mockito.mock(HttpRequest.class); final Function<HttpRequest, HttpRequest> function = httpRequest -> { isExecute.set(true); return request; }; final HttpAsyncRequestProducerDecorator decorator = new HttpAsyncRequestProducerDecorator( delegate, function, null); final HttpRequest httpRequest = decorator.generateRequest(); Assert.assertEquals(httpRequest, request); Assert.assertTrue(isExecute.get()); }
public boolean readRecord(Record record) throws IOException, InterruptedException { boolean hasNext = false; hasNext = this.phoenixRecordReader.nextKeyValue(); if (!hasNext) return hasNext; PhoenixRecordWritable phoenixRecordWritable = (PhoenixRecordWritable) this.phoenixRecordReader.getCurrentValue(); Map<String, Object> phoenixRecord = phoenixRecordWritable.getResultMap(); this.constructRecordFromPhoenix(record, phoenixRecord); return hasNext; }
@Test public void testReadRecord() throws Exception { List<Configuration> splits = this.generateSplitConfig(); int allRecordNum = 0; for (int i = 0; i < splits.size(); i++) { RecordSender recordSender = mock(RecordSender.class); when(recordSender.createRecord()).thenReturn(new DefaultRecord()); Record record = recordSender.createRecord(); HbaseSQLReaderTask hbase11SQLReaderTask = new HbaseSQLReaderTask(splits.get(i)); hbase11SQLReaderTask.init(); hbase11SQLReaderTask.prepare(); int num = 0; while (true) { boolean hasLine = false; try { hasLine = hbase11SQLReaderTask.readRecord(record); } catch (Exception e) { e.printStackTrace(); throw e; } if (!hasLine) break; num++; if (num % 100 == 0) System.out.println("record num is :" + num + ",record is " + record.toString()); when(recordSender.createRecord()).thenReturn(new DefaultRecord()); String recordStr = ""; for (int j = 0; j < record.getColumnNumber(); j++) { recordStr += record.getColumn(j).asString() + ","; } recordSender.sendToWriter(record); record = recordSender.createRecord(); } System.out.println("split id is " + i + ",record num = " + num); allRecordNum += num; recordSender.flush(); hbase11SQLReaderTask.destroy(); } System.out.println("all record num = " + allRecordNum); assertEquals(10000, allRecordNum); }
private RemotingCommand createUser(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { RemotingCommand response = RemotingCommand.createResponseCommand(null); CreateUserRequestHeader requestHeader = request.decodeCommandCustomHeader(CreateUserRequestHeader.class); if (StringUtils.isEmpty(requestHeader.getUsername())) { response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("The username is blank"); return response; } UserInfo userInfo = RemotingSerializable.decode(request.getBody(), UserInfo.class); userInfo.setUsername(requestHeader.getUsername()); User user = UserConverter.convertUser(userInfo); if (user.getUserType() == UserType.SUPER && isNotSuperUserLogin(request)) { response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("The super user can only be create by super user"); return response; } this.brokerController.getAuthenticationMetadataManager().createUser(user) .thenAccept(nil -> response.setCode(ResponseCode.SUCCESS)) .exceptionally(ex -> { LOGGER.error("create user {} error", user.getUsername(), ex); return handleAuthException(response, ex); }) .join(); return response; }
@Test public void testCreateUser() throws RemotingCommandException { when(authenticationMetadataManager.createUser(any(User.class))) .thenReturn(CompletableFuture.completedFuture(null)); CreateUserRequestHeader createUserRequestHeader = new CreateUserRequestHeader(); createUserRequestHeader.setUsername("abc"); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.AUTH_CREATE_USER, createUserRequestHeader); request.setVersion(441); request.addExtField("AccessKey", "rocketmq"); request.makeCustomHeaderToNet(); UserInfo userInfo = UserInfo.of("abc", "123", UserType.NORMAL.getName()); request.setBody(JSON.toJSONBytes(userInfo)); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); when(authenticationMetadataManager.isSuperUser(eq("rocketmq"))).thenReturn(CompletableFuture.completedFuture(true)); createUserRequestHeader = new CreateUserRequestHeader(); createUserRequestHeader.setUsername("super"); request = RemotingCommand.createRequestCommand(RequestCode.AUTH_CREATE_USER, createUserRequestHeader); request.setVersion(441); request.addExtField("AccessKey", "rocketmq"); request.makeCustomHeaderToNet(); userInfo = UserInfo.of("super", "123", UserType.SUPER.getName()); request.setBody(JSON.toJSONBytes(userInfo)); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); when(authenticationMetadataManager.isSuperUser(eq("rocketmq"))).thenReturn(CompletableFuture.completedFuture(false)); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR); }
public boolean hasViewAccessToTemplate(PipelineTemplateConfig template, CaseInsensitiveString username, List<Role> roles, boolean isGroupAdministrator) { boolean hasViewAccessToTemplate = template.getAuthorization().isViewUser(username, roles); hasViewAccessToTemplate = hasViewAccessToTemplate || (template.isAllowGroupAdmins() && isGroupAdministrator); return hasViewAccessToTemplate; }
@Test public void shouldReturnFalseIfUserWithinARoleCannotViewTemplate() { CaseInsensitiveString templateViewUser = new CaseInsensitiveString("template-admin"); Role securityConfigRole = getSecurityConfigRole(templateViewUser); List<Role> roles = setupRoles(securityConfigRole); String templateName = "template1"; PipelineTemplateConfig template = PipelineTemplateConfigMother.createTemplate(templateName, StageConfigMother.manualStage("stage")); template.setAuthorization(new Authorization(new ViewConfig(new AdminRole(new CaseInsensitiveString("another-role"))))); TemplatesConfig templates = new TemplatesConfig(template); assertThat(templates.hasViewAccessToTemplate(template, templateViewUser, roles, false), is(false)); }
public static Schema generateProjectionSchema(Schema originalSchema, List<String> fieldNames) { Map<String, Field> schemaFieldsMap = originalSchema.getFields().stream() .map(r -> Pair.of(r.name().toLowerCase(), r)).collect(Collectors.toMap(Pair::getLeft, Pair::getRight)); List<Schema.Field> projectedFields = new ArrayList<>(); for (String fn : fieldNames) { Schema.Field field = schemaFieldsMap.get(fn.toLowerCase()); if (field == null) { throw new HoodieException("Field " + fn + " not found in log schema. Query cannot proceed! " + "Derived Schema Fields: " + new ArrayList<>(schemaFieldsMap.keySet())); } else { projectedFields.add(new Schema.Field(field.name(), field.schema(), field.doc(), field.defaultVal())); } } Schema projectedSchema = Schema.createRecord(originalSchema.getName(), originalSchema.getDoc(), originalSchema.getNamespace(), originalSchema.isError()); projectedSchema.setFields(projectedFields); return projectedSchema; }
@Test public void testGenerateProjectionSchema() { Schema originalSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(EXAMPLE_SCHEMA)); Schema schema1 = HoodieAvroUtils.generateProjectionSchema(originalSchema, Arrays.asList("_row_key", "timestamp")); assertEquals(2, schema1.getFields().size()); List<String> fieldNames1 = schema1.getFields().stream().map(Schema.Field::name).collect(Collectors.toList()); assertTrue(fieldNames1.contains("_row_key")); assertTrue(fieldNames1.contains("timestamp")); assertTrue(assertThrows(HoodieException.class, () -> HoodieAvroUtils.generateProjectionSchema(originalSchema, Arrays.asList("_row_key", "timestamp", "fake_field"))) .getMessage().contains("Field fake_field not found in log schema. Query cannot proceed!")); }
@Override public void onRestResponse(RestResponse res, RequestContext requestContext, Map<String, String> wireAttrs, NextFilter<RestRequest, RestResponse> nextFilter) { try { if (res.getEntity().length() > 0) { String responseAcceptedEncodings = (String) requestContext.getLocalAttr(HttpConstants.ACCEPT_ENCODING); if (responseAcceptedEncodings == null) { throw new CompressionException(HttpConstants.ACCEPT_ENCODING + " not in local attribute."); } List<AcceptEncoding> parsedEncodings = AcceptEncoding.parseAcceptEncodingHeader(responseAcceptedEncodings, _supportedEncoding); EncodingType selectedEncoding = AcceptEncoding.chooseBest(parsedEncodings); //Check if there exists an acceptable encoding if (selectedEncoding != null) { if (selectedEncoding.hasCompressor() && res.getEntity().length() > (Integer) requestContext.getLocalAttr(HttpConstants.HEADER_RESPONSE_COMPRESSION_THRESHOLD)) { Compressor compressor = selectedEncoding.getCompressor(); ByteString compressed = compressor.deflate(res.getEntity()); if (compressed.length() < res.getEntity().length()) { RestResponseBuilder resCompress = res.builder(); resCompress.removeHeader(HttpConstants.CONTENT_LENGTH); resCompress.addHeaderValue(HttpConstants.CONTENT_ENCODING, compressor.getContentEncodingName()); resCompress.setEntity(compressed); res = resCompress.build(); } } } else { //Not acceptable encoding status res = res.builder().setStatus(HttpConstants.NOT_ACCEPTABLE).setEntity(new byte[0]).build(); } } } catch (CompressionException e) { LOG.error(e.getMessage(), e.getCause()); } nextFilter.onResponse(res, requestContext, wireAttrs); }
@Test(dataProvider = "headersData") public void testResponseCompressionRules(String acceptEncoding, int compressionThreshold, EncodingType expectedContentEncoding) throws CompressionException, URISyntaxException { ServerCompressionFilter serverCompressionFilter = new ServerCompressionFilter(ACCEPT_COMPRESSIONS); RequestContext context = new RequestContext(); context.putLocalAttr(HttpConstants.ACCEPT_ENCODING, acceptEncoding); context.putLocalAttr(HttpConstants.HEADER_RESPONSE_COMPRESSION_THRESHOLD, compressionThreshold); int originalLength = 100; byte[] entity = new byte[originalLength]; Arrays.fill(entity, (byte) 'A'); int compressedLength = (expectedContentEncoding == null) ? originalLength : expectedContentEncoding.getCompressor().deflate(new ByteArrayInputStream(entity)).length; String expectedContentEncodingName = (expectedContentEncoding == null) ? null : expectedContentEncoding.getHttpName(); RestResponse restResponse = new RestResponseBuilder().setEntity(entity).build(); serverCompressionFilter.onRestResponse(restResponse, context, Collections.<String, String>emptyMap(), new HeaderCaptureFilter(HttpConstants.CONTENT_ENCODING, expectedContentEncodingName, compressedLength)); }
public static Ip6Address valueOf(byte[] value) { return new Ip6Address(value); }
@Test public void testVersion() { IpAddress ipAddress; // IPv6 ipAddress = IpAddress.valueOf("::"); assertThat(ipAddress.version(), is(IpAddress.Version.INET6)); }
static DiscreteResources of(Set<DiscreteResource> resources) { if (resources.isEmpty()) { return DiscreteResources.empty(); } DiscreteResource parent = resources.iterator().next().parent().get(); return of(parent, resources); }
@Test public void testPortSerialize() { DiscreteResource device = Resources.discrete(DeviceId.deviceId("a")).resource(); Set<DiscreteResource> resources = IntStream.range(0, 100) .mapToObj(PortNumber::portNumber) .map(device::child) .collect(Collectors.toSet()); DiscreteResources original = EncodableDiscreteResources.of(resources); byte[] bytes = serializer.encode(original); DiscreteResources decoded = serializer.decode(bytes); assertThat(decoded, is(original)); }
public void cleanupOrphanedInternalTopics( final ServiceContext serviceContext, final Set<String> queryApplicationIds ) { final KafkaTopicClient topicClient = serviceContext.getTopicClient(); final Set<String> topicNames; try { topicNames = topicClient.listTopicNames(); } catch (KafkaResponseGetFailedException e) { LOG.error("Couldn't fetch topic names", e); return; } // Find any transient query topics final Set<String> orphanedQueryApplicationIds = topicNames.stream() .map(topicName -> queryApplicationIds.stream().filter(topicName::startsWith).findFirst()) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.toSet()); for (final String queryApplicationId : orphanedQueryApplicationIds) { cleanupService.addCleanupTask( new QueryCleanupService.QueryCleanupTask( serviceContext, queryApplicationId, Optional.empty(), true, ksqlConfig.getKsqlStreamConfigProps() .getOrDefault( StreamsConfig.STATE_DIR_CONFIG, StreamsConfig.configDef().defaultValues().get(StreamsConfig.STATE_DIR_CONFIG)) .toString(), ksqlConfig.getString(KsqlConfig.KSQL_SERVICE_ID_CONFIG), ksqlConfig.getString(KsqlConfig.KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG))); } }
@Test public void shouldSkip_exception() { // Given when(topicClient.listTopicNames()) .thenThrow(new KafkaResponseGetFailedException("error!", new Exception())); // When cleaner.cleanupOrphanedInternalTopics(serviceContext, ImmutableSet.of()); // Then verify(queryCleanupService, never()).addCleanupTask(any()); }
public static InetSocketAddress getInetAddress(String address) { if (address.startsWith(HTTP)) { address = address.replaceFirst(HTTP, ""); } if (address.startsWith(HTTPS)) { address = address.replaceFirst(HTTPS, ""); } String[] addressStr = address.split(":"); if (addressStr.length < 2) { throw new ConnectionException("Failed to connect to the server because the IP address is invalid. Procedure"); } return InetSocketAddress.createUnresolved(addressStr[0], Integer.parseInt(addressStr[1])); }
@Test(expected = ConnectionException.class) public void testAddress3() { AddressUtil.getInetAddress(address3); }
public void add(CSQueue queue) { String fullName = queue.getQueuePath(); String shortName = queue.getQueueShortName(); try { modificationLock.writeLock().lock(); fullNameQueues.put(fullName, queue); getMap.put(fullName, queue); //we only update short queue name ambiguity for non root queues if (!shortName.equals(CapacitySchedulerConfiguration.ROOT)) { //getting or creating the ambiguity set for the current queue Set<String> fullNamesSet = this.shortNameToLongNames.getOrDefault(shortName, new HashSet<>()); //adding the full name to the queue fullNamesSet.add(fullName); this.shortNameToLongNames.put(shortName, fullNamesSet); } //updating the getMap references for the queue updateGetMapForShortName(shortName); } finally { modificationLock.writeLock().unlock(); } }
@Test public void testQueueReferencePrecedence() throws IOException { CSQueueStore store = new CSQueueStore(); //root.main.a.b //root.second.a.d.b.c // a - ambiguous both instances are parent queues // b - leaf queue b takes precedence over parent queue b //root.main CSQueue main = createParentQueue("main", root); //root.main.A CSQueue mainA = createParentQueue("A", main); //root.main.A.B CSQueue mainAB = createLeafQueue("B", mainA); //root.second CSQueue second = createParentQueue("second", root); //root.second.A CSQueue secondA = createParentQueue("A", second); //root.second.A.B CSQueue secondAD = createParentQueue("D", secondA); //root.second.A.B.D CSQueue secondADB = createParentQueue("B", secondAD); //root.second.A.B.D.C CSQueue secondADBC = createLeafQueue("C", secondADB); store.add(main); store.add(mainA); store.add(mainAB); store.add(second); store.add(secondA); store.add(secondAD); store.add(secondADB); store.add(secondADBC); assertAccessibleByAllNames(store, main); assertAccessibleByAllNames(store, second); assertAmbiguous(store, mainA); assertAmbiguous(store, secondA); assertAmbiguous(store, mainAB); assertAccessibleByAllNames(store, secondAD); assertAmbiguous(store, secondADB); assertAccessibleByAllNames(store, secondADBC); }
public List<SupportedAnalytics> supportedPipelineAnalytics() { return this.supportedAnalytics.stream().filter(s -> PIPELINE_TYPE.equalsIgnoreCase(s.getType())).collect(Collectors.toList()); }
@Test public void shouldListSupportedAnalyticsForPipelines() { Capabilities capabilities = new Capabilities(List.of(new SupportedAnalytics("pipeline", "id1", "title1" ), new SupportedAnalytics("Pipeline", "id2", "title2" ))); assertThat(capabilities.supportedPipelineAnalytics(), is(List.of(new SupportedAnalytics("pipeline", "id1", "title1" ), new SupportedAnalytics("Pipeline", "id2", "title2" )))); assertTrue(new Capabilities(Collections.emptyList()).supportedPipelineAnalytics().isEmpty()); }
public static void validateValue(Schema schema, Object value) { validateValue(null, schema, value); }
@Test public void testValidateValueMismatchMapKey() { assertThrows(DataException.class, () -> ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Collections.singletonMap("wrong key type", "value"))); }
@Override public V getAndSet(V newValue) { return get(getAndSetAsync(newValue)); }
@Test public void testGetAndSet() { RBucket<List<String>> r1 = redisson.getBucket("testGetAndSet"); assertThat(r1.getAndSet(Arrays.asList("81"))).isNull(); assertThat(r1.getAndSet(Arrays.asList("1"))).isEqualTo(Arrays.asList("81")); assertThat(r1.get()).isEqualTo(Arrays.asList("1")); assertThat(r1.getAndSet(null)).isEqualTo(Arrays.asList("1")); assertThat(r1.get()).isNull(); assertThat(r1.isExists()).isFalse(); }
@VisibleForTesting void saveApprove(Long userId, Integer userType, String clientId, String scope, Boolean approved, LocalDateTime expireTime) { // 先更新 OAuth2ApproveDO approveDO = new OAuth2ApproveDO().setUserId(userId).setUserType(userType) .setClientId(clientId).setScope(scope).setApproved(approved).setExpiresTime(expireTime); if (oauth2ApproveMapper.update(approveDO) == 1) { return; } // 失败,则说明不存在,进行更新 oauth2ApproveMapper.insert(approveDO); }
@Test public void testSaveApprove_insert() { // 准备参数 Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); String clientId = randomString(); String scope = randomString(); Boolean approved = randomBoolean(); LocalDateTime expireTime = LocalDateTime.ofInstant(randomDay(1, 30).toInstant(), ZoneId.systemDefault()); // mock 方法 // 调用 oauth2ApproveService.saveApprove(userId, userType, clientId, scope, approved, expireTime); // 断言 List<OAuth2ApproveDO> result = oauth2ApproveMapper.selectList(); assertEquals(1, result.size()); assertEquals(userId, result.get(0).getUserId()); assertEquals(userType, result.get(0).getUserType()); assertEquals(clientId, result.get(0).getClientId()); assertEquals(scope, result.get(0).getScope()); assertEquals(approved, result.get(0).getApproved()); assertEquals(expireTime, result.get(0).getExpiresTime()); }
public static void write(Message message, File toFile) { OutputStream out = null; try { out = new BufferedOutputStream(new FileOutputStream(toFile, false)); message.writeTo(out); } catch (Exception e) { throw ContextException.of("Unable to write message", e).addContext("file", toFile); } finally { IOUtils.closeQuietly(out); } }
@Test public void fail_to_write_single_message() { assertThatThrownBy(() -> { File dir = temp.newFolder(); Protobuf.write(Fake.getDefaultInstance(), dir); }).isInstanceOf(ContextException.class) .hasMessageContaining("Unable to write message"); }
@Override public void createEndpoints(Endpoints endpoints) { checkNotNull(endpoints, ERR_NULL_ENDPOINTS); checkArgument(!Strings.isNullOrEmpty(endpoints.getMetadata().getUid()), ERR_NULL_ENDPOINTS_UID); k8sEndpointsStore.createEndpoints(endpoints); log.info(String.format(MSG_ENDPOINTS, endpoints.getMetadata().getName(), MSG_CREATED)); }
@Test(expected = NullPointerException.class) public void testCreateNullEndpoints() { target.createEndpoints(null); }
public static <T> PTransform<PCollection<T>, PCollection<ValueInSingleWindow<T>>> windows() { return new Window<>(); }
@Test @Category({NeedsRunner.class, UsesTestStream.class}) public void globalWindowNoKeys() { PCollection<ValueInSingleWindow<String>> result = pipeline .apply( TestStream.create(StringUtf8Coder.of()) .addElements(TimestampedValue.of("dei", new Instant(123L))) .advanceWatermarkToInfinity()) .apply(Reify.windows()); PAssert.that(result) .containsInAnyOrder( ValueInSingleWindow.of( "dei", new Instant(123L), GlobalWindow.INSTANCE, PaneInfo.NO_FIRING)); pipeline.run(); }
@Override protected SocketFactory getSocketFactory() { return socketFactory; }
@Test public void testUsingDefaultConfig() throws Exception { // should be able to start successfully with no SSL configuration at all remote.setRemoteHost(InetAddress.getLocalHost().getHostAddress()); remote.setPort(6000); remote.start(); assertNotNull(remote.getSocketFactory()); }
@Override public void dropPartition( ObjectPath tablePath, CatalogPartitionSpec partitionSpec, boolean ignoreIfNotExists) throws PartitionNotExistException, CatalogException { checkNotNull(tablePath, "Table path cannot be null"); checkNotNull(partitionSpec, "CatalogPartitionSpec cannot be null"); final CatalogBaseTable table; try { table = getTable(tablePath); } catch (TableNotExistException e) { if (!ignoreIfNotExists) { throw new PartitionNotExistException(getName(), tablePath, partitionSpec, e); } else { return; } } try (HoodieFlinkWriteClient<?> writeClient = HoodieCatalogUtil.createWriteClient(tablePath, table, hiveConf, this::inferTablePath)) { boolean hiveStylePartitioning = Boolean.parseBoolean(table.getOptions().get(FlinkOptions.HIVE_STYLE_PARTITIONING.key())); writeClient.deletePartitions( Collections.singletonList(HoodieCatalogUtil.inferPartitionPath(hiveStylePartitioning, partitionSpec)), writeClient.createNewInstantTime()) .forEach(writeStatus -> { if (writeStatus.hasErrors()) { throw new HoodieMetadataException(String.format("Failed to commit metadata table records at file id %s.", writeStatus.getFileId())); } }); client.dropPartition( tablePath.getDatabaseName(), tablePath.getObjectName(), HoodieCatalogUtil.getOrderedPartitionValues( getName(), getHiveConf(), partitionSpec, ((CatalogTable) table).getPartitionKeys(), tablePath), true); } catch (NoSuchObjectException e) { if (!ignoreIfNotExists) { throw new PartitionNotExistException(getName(), tablePath, partitionSpec, e); } } catch (MetaException | PartitionSpecInvalidException e) { throw new PartitionNotExistException(getName(), tablePath, partitionSpec, e); } catch (Exception e) { throw new CatalogException( String.format( "Failed to drop partition %s of table %s", partitionSpec, tablePath), e); } }
@Test public void testDropPartition() throws Exception { Map<String, String> options = new HashMap<>(); options.put(FactoryUtil.CONNECTOR.key(), "hudi"); CatalogTable table = new CatalogTableImpl(schema, partitions, options, "hudi table"); hoodieCatalog.createTable(tablePath, table, false); CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(new HashMap<String, String>() { { put("par1", "20221020"); } }); // drop non-exist partition assertThrows(PartitionNotExistException.class, () -> hoodieCatalog.dropPartition(tablePath, partitionSpec, false)); Table hiveTable = hoodieCatalog.getHiveTable(tablePath); StorageDescriptor partitionSd = new StorageDescriptor(hiveTable.getSd()); partitionSd.setLocation(new Path(partitionSd.getLocation(), HoodieCatalogUtil.inferPartitionPath(true, partitionSpec)).toString()); hoodieCatalog.getClient().add_partition(new Partition(Collections.singletonList("20221020"), tablePath.getDatabaseName(), tablePath.getObjectName(), 0, 0, partitionSd, null)); assertNotNull(getHivePartition(partitionSpec)); // drop partition 'par1' hoodieCatalog.dropPartition(tablePath, partitionSpec, false); String tablePathStr = hoodieCatalog.inferTablePath(tablePath, hoodieCatalog.getTable(tablePath)); HoodieTableMetaClient metaClient = HoodieTestUtils.createMetaClient(new HadoopStorageConfiguration(hoodieCatalog.getHiveConf()), tablePathStr); HoodieInstant latestInstant = metaClient.getActiveTimeline().filterCompletedInstants().lastInstant().orElse(null); assertNotNull(latestInstant, "Delete partition commit should be completed"); HoodieCommitMetadata commitMetadata = WriteProfiles.getCommitMetadata(tablePath.getObjectName(), new org.apache.flink.core.fs.Path(tablePathStr), latestInstant, metaClient.getActiveTimeline()); assertThat(commitMetadata, instanceOf(HoodieReplaceCommitMetadata.class)); HoodieReplaceCommitMetadata replaceCommitMetadata = (HoodieReplaceCommitMetadata) commitMetadata; assertThat(replaceCommitMetadata.getPartitionToReplaceFileIds().size(), is(1)); assertThrows(NoSuchObjectException.class, () -> getHivePartition(partitionSpec)); }
@Override public AppendResult appendCommitLog(ByteBuffer message) { if (closed) { return AppendResult.FILE_CLOSED; } return commitLog.append(message, MessageFormatUtil.getStoreTimeStamp(message)); }
@Test public void testAppendCommitLog() { String topic = "CommitLogTest"; FlatMessageFile flatFile = new FlatMessageFile(flatFileFactory, topic, 0); Assert.assertTrue(flatFile.getTopicId() >= 0); Assert.assertEquals(topic, flatFile.getMessageQueue().getTopic()); Assert.assertEquals(0, flatFile.getMessageQueue().getQueueId()); Assert.assertFalse(flatFile.isFlatFileInit()); flatFile.flushMetadata(); Assert.assertNotNull(metadataStore.getQueue(flatFile.getMessageQueue())); long offset = 100; flatFile.initOffset(offset); for (int i = 0; i < 5; i++) { ByteBuffer buffer = MessageFormatUtilTest.buildMockedMessageBuffer(); DispatchRequest request = new DispatchRequest( topic, 0, i, (long) buffer.remaining() * i, buffer.remaining(), 0L); flatFile.appendCommitLog(buffer); flatFile.appendConsumeQueue(request); } Assert.assertNotNull(flatFile.getFileLock()); long time = MessageFormatUtil.getStoreTimeStamp(MessageFormatUtilTest.buildMockedMessageBuffer()); Assert.assertEquals(time, flatFile.getMinStoreTimestamp()); Assert.assertEquals(time, flatFile.getMaxStoreTimestamp()); long size = MessageFormatUtilTest.buildMockedMessageBuffer().remaining(); Assert.assertEquals(-1L, flatFile.getFirstMessageOffset()); Assert.assertEquals(0L, flatFile.getCommitLogMinOffset()); Assert.assertEquals(0L, flatFile.getCommitLogCommitOffset()); Assert.assertEquals(5 * size, flatFile.getCommitLogMaxOffset()); Assert.assertEquals(offset, flatFile.getConsumeQueueMinOffset()); Assert.assertEquals(offset, flatFile.getConsumeQueueCommitOffset()); Assert.assertEquals(offset + 5L, flatFile.getConsumeQueueMaxOffset()); Assert.assertTrue(flatFile.commitAsync().join()); Assert.assertEquals(6L, flatFile.getFirstMessageOffset()); Assert.assertEquals(0L, flatFile.getCommitLogMinOffset()); Assert.assertEquals(5 * size, flatFile.getCommitLogCommitOffset()); Assert.assertEquals(5 * size, flatFile.getCommitLogMaxOffset()); Assert.assertEquals(offset, flatFile.getConsumeQueueMinOffset()); Assert.assertEquals(offset + 5L, flatFile.getConsumeQueueCommitOffset()); Assert.assertEquals(offset + 5L, flatFile.getConsumeQueueMaxOffset()); // test read ByteBuffer buffer = flatFile.getMessageAsync(offset).join(); Assert.assertNotNull(buffer); Assert.assertEquals(size, buffer.remaining()); Assert.assertEquals(6L, MessageFormatUtil.getQueueOffset(buffer)); flatFile.destroyExpiredFile(0); flatFile.destroy(); }
public static TableSchema toTableSchema(Schema schema) { return new TableSchema().setFields(toTableFieldSchema(schema)); }
@Test public void testToTableSchema_map() { TableSchema schema = toTableSchema(MAP_MAP_TYPE); assertThat(schema.getFields().size(), equalTo(1)); TableFieldSchema field = schema.getFields().get(0); assertThat(field.getName(), equalTo("map")); assertThat(field.getType(), equalTo(StandardSQLTypeName.STRUCT.toString())); assertThat(field.getMode(), equalTo(Mode.REPEATED.toString())); assertThat(field.getFields(), containsInAnyOrder(MAP_KEY, MAP_VALUE)); }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test public void testSiblingsOnProperty() { Reader reader = new Reader(new SwaggerConfiguration().openAPI(new OpenAPI()).openAPI31(true)); Set<Class<?>> classes = new HashSet<>(Arrays.asList(SiblingPropResource.class, WebHookResource.class)); OpenAPI openAPI = reader.read(classes); String yaml = "openapi: 3.1.0\n" + "paths:\n" + " /pet:\n" + " put:\n" + " tags:\n" + " - pet\n" + " summary: Update an existing pet\n" + " operationId: updatePet\n" + " requestBody:\n" + " description: Pet object that needs to be updated in the store\n" + " content:\n" + " application/json:\n" + " schema:\n" + " $ref: '#/components/schemas/Pet'\n" + " description: A Pet in JSON Format\n" + " required:\n" + " - id\n" + " writeOnly: true\n" + " application/xml:\n" + " schema:\n" + " $ref: '#/components/schemas/Pet'\n" + " description: A Pet in XML Format\n" + " required:\n" + " - id\n" + " writeOnly: true\n" + " required: true\n" + " responses:\n" + " \"200\":\n" + " description: Successful operation\n" + " content:\n" + " application/xml:\n" + " schema:\n" + " $ref: '#/components/schemas/Pet'\n" + " description: A Pet in XML Format\n" + " readOnly: true\n" + " application/json:\n" + " schema:\n" + " $ref: '#/components/schemas/Pet'\n" + " description: A Pet in JSON Format\n" + " readOnly: true\n" + " \"400\":\n" + " description: Invalid ID supplied\n" + " \"404\":\n" + " description: Pet not found\n" + " \"405\":\n" + " description: Validation exception\n" + " security:\n" + " - petstore_auth:\n" + " - write:pets\n" + " - read:pets\n" + " - mutual_tls: []\n" + "components:\n" + " schemas:\n" + " Category:\n" + " description: parent\n" + " properties:\n" + " id:\n" + " type: integer\n" + " format: int64\n" + " Pet:\n" + " description: Pet\n" + " properties:\n" + " category:\n" + " $ref: '#/components/schemas/Category'\n" + " description: child\n" + "webhooks:\n" + " newPet:\n" + " post:\n" + " requestBody:\n" + " description: Information about a new pet in the system\n" + " content:\n" + " application/json:\n" + " schema:\n" + " $ref: '#/components/schemas/Pet'\n" + " description: Webhook Pet\n" + " responses:\n" + " \"200\":\n" + " description: Return a 200 status to indicate that the data was received\n" + " successfully\n"; SerializationMatchers.assertEqualsToYaml31(openAPI, yaml); }
public static String hashpw(String password, String salt) throws IllegalArgumentException { BCrypt B; String real_salt; byte passwordb[], saltb[], hashed[]; char minor = (char) 0; int rounds, off = 0; StringBuilder rs = new StringBuilder(); if (salt == null) { throw new IllegalArgumentException("salt cannot be null"); } int saltLength = salt.length(); if (saltLength < 28) { throw new IllegalArgumentException("Invalid salt"); } if (salt.charAt(0) != '$' || salt.charAt(1) != '2') { throw new IllegalArgumentException("Invalid salt version"); } if (salt.charAt(2) == '$') { off = 3; } else { minor = salt.charAt(2); if (minor != 'a' || salt.charAt(3) != '$') { throw new IllegalArgumentException("Invalid salt revision"); } off = 4; } if (saltLength - off < 25) { throw new IllegalArgumentException("Invalid salt"); } // Extract number of rounds if (salt.charAt(off + 2) > '$') { throw new IllegalArgumentException("Missing salt rounds"); } rounds = Integer.parseInt(salt.substring(off, off + 2)); real_salt = salt.substring(off + 3, off + 25); try { passwordb = (password + (minor >= 'a' ? "\000" : "")).getBytes("UTF-8"); } catch (UnsupportedEncodingException uee) { throw new AssertionError("UTF-8 is not supported"); } saltb = decode_base64(real_salt, BCRYPT_SALT_LEN); B = new BCrypt(); hashed = B.crypt_raw(passwordb, saltb, rounds); rs.append("$2"); if (minor >= 'a') { rs.append(minor); } rs.append("$"); if (rounds < 10) { rs.append("0"); } rs.append(rounds); rs.append("$"); encode_base64(saltb, saltb.length, rs); encode_base64(hashed, bf_crypt_ciphertext.length * 4 - 1, rs); return rs.toString(); }
@Test public void testHashpwTooManyRounds() throws IllegalArgumentException { thrown.expect(IllegalArgumentException.class); BCrypt.hashpw("foo", "$2a$32$......................"); }
@Override public List<T> slaves() { List<ChildData<T>> children = getActiveChildren(); children.sort(sequenceComparator); List<T> slaves = new ArrayList<>(); for (int i = 1; i < children.size(); i++) { slaves.add(children.get(i).getNode()); } return slaves; }
@Test public void testSlavesWithStaleNodes1() throws Exception { putChildData(group, PATH + "/001", "container1"); // stale putChildData(group, PATH + "/002", "container1"); putChildData(group, PATH + "/003", "container2"); // stale putChildData(group, PATH + "/004", "container3"); // stale putChildData(group, PATH + "/005", "container2"); putChildData(group, PATH + "/006", "container3"); List<NodeState> slaves = group.slaves(); assertThat(slaves.size(), equalTo(2)); assertThat(slaves.get(0).getContainer(), equalTo("container2")); assertThat(slaves.get(1).getContainer(), equalTo("container3")); }
public static <NodeT> Iterable<NodeT> topologicalOrder(Network<NodeT, ?> network) { return computeTopologicalOrder(Graphs.copyOf(network)); }
@Test public void testTopologicalSortWithEmptyNetwork() { assertThat(Networks.topologicalOrder(createEmptyNetwork()), emptyIterable()); }
@Override public CompletableFuture<Void> updateTopicPoliciesAsync(TopicName topicName, TopicPolicies policies) { if (NamespaceService.isHeartbeatNamespace(topicName.getNamespaceObject())) { return CompletableFuture.failedFuture(new BrokerServiceException.NotAllowedException( "Not allowed to update topic policy for the heartbeat topic")); } return sendTopicPolicyEvent(topicName, ActionType.UPDATE, policies); }
@Test public void testGetTopicPoliciesWithRetry() throws Exception { Field initMapField = SystemTopicBasedTopicPoliciesService.class.getDeclaredField("policyCacheInitMap"); initMapField.setAccessible(true); Map<NamespaceName, Boolean> initMap = (Map)initMapField.get(systemTopicBasedTopicPoliciesService); initMap.remove(NamespaceName.get(NAMESPACE1)); Field readerCaches = SystemTopicBasedTopicPoliciesService.class.getDeclaredField("readerCaches"); readerCaches.setAccessible(true); Map<NamespaceName, CompletableFuture<SystemTopicClient.Reader<PulsarEvent>>> readers = (Map)readerCaches.get(systemTopicBasedTopicPoliciesService); readers.remove(NamespaceName.get(NAMESPACE1)); Backoff backoff = new BackoffBuilder() .setInitialTime(500, TimeUnit.MILLISECONDS) .setMandatoryStop(5000, TimeUnit.MILLISECONDS) .setMax(1000, TimeUnit.MILLISECONDS) .create(); TopicPolicies initPolicy = TopicPolicies.builder() .maxConsumerPerTopic(10) .build(); @Cleanup("shutdownNow") ScheduledExecutorService executors = Executors.newScheduledThreadPool(1); executors.schedule(new Runnable() { @Override public void run() { try { systemTopicBasedTopicPoliciesService.updateTopicPoliciesAsync(TOPIC1, initPolicy).get(); } catch (Exception ignore) {} } }, 2000, TimeUnit.MILLISECONDS); Awaitility.await().untilAsserted(() -> { Optional<TopicPolicies> topicPolicies = systemTopicBasedTopicPoliciesService .getTopicPoliciesAsyncWithRetry(TOPIC1, backoff, pulsar.getExecutor(), false).get(); Assert.assertTrue(topicPolicies.isPresent()); if (topicPolicies.isPresent()) { Assert.assertEquals(topicPolicies.get(), initPolicy); } }); }
@Timed @Path("/{destination}") @PUT @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) @ManagedAsync @Operation( summary = "Send a message", description = """ Deliver a message to a single recipient. May be authenticated or unauthenticated; if unauthenticated, an unidentifed-access key or group-send endorsement token must be provided, unless the message is a story. """) @ApiResponse(responseCode="200", description="Message was successfully sent", useReturnTypeSchema=true) @ApiResponse( responseCode="401", description="The message is not a story and the authorization, unauthorized access key, or group send endorsement token is missing or incorrect") @ApiResponse( responseCode="404", description="The message is not a story and some the recipient service ID does not correspond to a registered Signal user") @ApiResponse( responseCode = "409", description = "Incorrect set of devices supplied for recipient", content = @Content(schema = @Schema(implementation = AccountMismatchedDevices[].class))) @ApiResponse( responseCode = "410", description = "Mismatched registration ids supplied for some recipient devices", content = @Content(schema = @Schema(implementation = AccountStaleDevices[].class))) public Response sendMessage(@ReadOnly @Auth Optional<AuthenticatedDevice> source, @Parameter(description="The recipient's unidentified access key") @HeaderParam(HeaderUtils.UNIDENTIFIED_ACCESS_KEY) Optional<Anonymous> accessKey, @Parameter(description="A group send endorsement token covering the recipient. Must not be combined with `Unidentified-Access-Key` or set on a story message.") @HeaderParam(HeaderUtils.GROUP_SEND_TOKEN) @Nullable GroupSendTokenHeader groupSendToken, @HeaderParam(HttpHeaders.USER_AGENT) String userAgent, @Parameter(description="If true, deliver the message only to recipients that are online when it is sent") @PathParam("destination") ServiceIdentifier destinationIdentifier, @Parameter(description="If true, the message is a story; access tokens are not checked and sending to nonexistent recipients is permitted") @QueryParam("story") boolean isStory, @Parameter(description="The encrypted message payloads for each recipient device") @NotNull @Valid IncomingMessageList messages, @Context ContainerRequestContext context) throws RateLimitExceededException { final Sample sample = Timer.start(); try { if (source.isEmpty() && accessKey.isEmpty() && groupSendToken == null && !isStory) { throw new WebApplicationException(Response.Status.UNAUTHORIZED); } if (groupSendToken != null) { if (!source.isEmpty() || !accessKey.isEmpty()) { throw new BadRequestException("Group send endorsement tokens should not be combined with other authentication"); } else if (isStory) { throw new BadRequestException("Group send endorsement tokens should not be sent for story messages"); } } final String senderType; if (source.isPresent()) { if (source.get().getAccount().isIdentifiedBy(destinationIdentifier)) { senderType = SENDER_TYPE_SELF; } else { senderType = SENDER_TYPE_IDENTIFIED; } } else { senderType = SENDER_TYPE_UNIDENTIFIED; } boolean isSyncMessage = source.isPresent() && source.get().getAccount().isIdentifiedBy(destinationIdentifier); if (isSyncMessage && destinationIdentifier.identityType() == IdentityType.PNI) { throw new WebApplicationException(Status.FORBIDDEN); } Optional<Account> destination; if (!isSyncMessage) { destination = accountsManager.getByServiceIdentifier(destinationIdentifier); } else { destination = source.map(AuthenticatedDevice::getAccount); } final Optional<Response> spamCheck = spamChecker.checkForSpam( context, source.map(AuthenticatedDevice::getAccount), destination); if (spamCheck.isPresent()) { return spamCheck.get(); } final Optional<byte[]> spamReportToken = switch (senderType) { case SENDER_TYPE_IDENTIFIED -> reportSpamTokenProvider.makeReportSpamToken(context, source.get(), destination); default -> Optional.empty(); }; int totalContentLength = 0; for (final IncomingMessage message : messages.messages()) { int contentLength = 0; if (StringUtils.isNotEmpty(message.content())) { contentLength += message.content().length(); } validateContentLength(contentLength, false, userAgent); validateEnvelopeType(message.type(), userAgent); totalContentLength += contentLength; } try { rateLimiters.getInboundMessageBytes().validate(destinationIdentifier.uuid(), totalContentLength); } catch (final RateLimitExceededException e) { if (dynamicConfigurationManager.getConfiguration().getInboundMessageByteLimitConfiguration().enforceInboundLimit()) { messageByteLimitEstimator.add(destinationIdentifier.uuid().toString()); throw e; } } try { if (isStory) { // Stories will be checked by the client; we bypass access checks here for stories. } else if (groupSendToken != null) { checkGroupSendToken(List.of(destinationIdentifier.toLibsignal()), groupSendToken); if (destination.isEmpty()) { throw new NotFoundException(); } } else { OptionalAccess.verify(source.map(AuthenticatedDevice::getAccount), accessKey, destination, destinationIdentifier); } boolean needsSync = !isSyncMessage && source.isPresent() && source.get().getAccount().getDevices().size() > 1; // We return 200 when stories are sent to a non-existent account. Since story sends bypass OptionalAccess.verify // we leak information about whether a destination UUID exists if we return any other code (e.g. 404) from // these requests. if (isStory && destination.isEmpty()) { return Response.ok(new SendMessageResponse(needsSync)).build(); } // if destination is empty we would either throw an exception in OptionalAccess.verify when isStory is false // or else return a 200 response when isStory is true. assert destination.isPresent(); if (source.isPresent() && !isSyncMessage) { checkMessageRateLimit(source.get(), destination.get(), userAgent); } if (isStory) { rateLimiters.getStoriesLimiter().validate(destination.get().getUuid()); } final Set<Byte> excludedDeviceIds; if (isSyncMessage) { excludedDeviceIds = Set.of(source.get().getAuthenticatedDevice().getId()); } else { excludedDeviceIds = Collections.emptySet(); } DestinationDeviceValidator.validateCompleteDeviceList(destination.get(), messages.messages().stream().map(IncomingMessage::destinationDeviceId).collect(Collectors.toSet()), excludedDeviceIds); DestinationDeviceValidator.validateRegistrationIds(destination.get(), messages.messages(), IncomingMessage::destinationDeviceId, IncomingMessage::destinationRegistrationId, destination.get().getPhoneNumberIdentifier().equals(destinationIdentifier.uuid())); final String authType; if (SENDER_TYPE_IDENTIFIED.equals(senderType)) { authType = AUTH_TYPE_IDENTIFIED; } else if (isStory) { authType = AUTH_TYPE_STORY; } else if (groupSendToken != null) { authType = AUTH_TYPE_GROUP_SEND_TOKEN; } else { authType = AUTH_TYPE_ACCESS_KEY; } final List<Tag> tags = List.of(UserAgentTagUtil.getPlatformTag(userAgent), Tag.of(ENDPOINT_TYPE_TAG_NAME, ENDPOINT_TYPE_SINGLE), Tag.of(EPHEMERAL_TAG_NAME, String.valueOf(messages.online())), Tag.of(SENDER_TYPE_TAG_NAME, senderType), Tag.of(AUTH_TYPE_TAG_NAME, authType), Tag.of(IDENTITY_TYPE_TAG_NAME, destinationIdentifier.identityType().name())); for (IncomingMessage incomingMessage : messages.messages()) { Optional<Device> destinationDevice = destination.get().getDevice(incomingMessage.destinationDeviceId()); if (destinationDevice.isPresent()) { Metrics.counter(SENT_MESSAGE_COUNTER_NAME, tags).increment(); sendIndividualMessage( source, destination.get(), destinationDevice.get(), destinationIdentifier, messages.timestamp(), messages.online(), isStory, messages.urgent(), incomingMessage, userAgent, spamReportToken); } } return Response.ok(new SendMessageResponse(needsSync)).build(); } catch (MismatchedDevicesException e) { throw new WebApplicationException(Response.status(409) .type(MediaType.APPLICATION_JSON_TYPE) .entity(new MismatchedDevices(e.getMissingDevices(), e.getExtraDevices())) .build()); } catch (StaleDevicesException e) { throw new WebApplicationException(Response.status(410) .type(MediaType.APPLICATION_JSON) .entity(new StaleDevices(e.getStaleDevices())) .build()); } } finally { sample.stop(SEND_MESSAGE_LATENCY_TIMER); } }
@Test void testSingleDeviceCurrentNotUrgent() throws Exception { try (final Response response = resources.getJerseyTest() .target(String.format("/v1/messages/%s", SINGLE_DEVICE_UUID)) .request() .header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD)) .put(Entity.entity(SystemMapper.jsonMapper().readValue(jsonFixture("fixtures/current_message_single_device_not_urgent.json"), IncomingMessageList.class), MediaType.APPLICATION_JSON_TYPE))) { assertThat("Good Response", response.getStatus(), is(equalTo(200))); ArgumentCaptor<Envelope> captor = ArgumentCaptor.forClass(Envelope.class); verify(messageSender, times(1)).sendMessage(any(Account.class), any(Device.class), captor.capture(), eq(false)); assertTrue(captor.getValue().hasSourceUuid()); assertTrue(captor.getValue().hasSourceDevice()); assertFalse(captor.getValue().getUrgent()); } }
public PoissonDistribution(double lambda) { if (lambda < 0.0) { throw new IllegalArgumentException("Invalid lambda: " + lambda); } this.lambda = lambda; entropy = (Math.log(2 * Math.PI * Math.E) + Math.log(lambda)) / 2 - 1 / (12 * lambda) - 1 / (24 * lambda * lambda) - 19 / (360 * lambda * lambda * lambda); }
@Test public void testPoissonDistribution() { System.out.println("PoissonDistribution"); MathEx.setSeed(19650218); // to get repeatable results. PoissonDistribution instance = new PoissonDistribution(5.5); int[] data = instance.randi(1000); PoissonDistribution est = PoissonDistribution.fit(data); assertEquals(5.52, est.lambda, 1E-2); }
public static HazelcastInstance newHazelcastInstance(Config config) { if (config == null) { config = Config.load(); } return newHazelcastInstance( config, config.getInstanceName(), new DefaultNodeContext() ); }
@Test public void mobyNameGeneratedIfPropertyEnabled() { Config config = new Config(); config.getProperties().put(ClusterProperty.MOBY_NAMING_ENABLED.getName(), "true"); hazelcastInstance = HazelcastInstanceFactory.newHazelcastInstance(config); String name = hazelcastInstance.getName(); assertNotNull(name); assertNotContains(name, "_hzInstance_"); }
@Override public boolean test(Pickle pickle) { URI picklePath = pickle.getUri(); if (!lineFilters.containsKey(picklePath)) { return true; } for (Integer line : lineFilters.get(picklePath)) { if (Objects.equals(line, pickle.getLocation().getLine()) || Objects.equals(line, pickle.getScenarioLocation().getLine()) || pickle.getExamplesLocation().map(Location::getLine).map(line::equals).orElse(false) || pickle.getRuleLocation().map(Location::getLine).map(line::equals).orElse(false) || pickle.getFeatureLocation().map(Location::getLine).map(line::equals).orElse(false)) { return true; } } return false; }
@Test void empty() { LinePredicate predicate = new LinePredicate(singletonMap( featurePath, emptyList())); assertFalse(predicate.test(firstPickle)); assertFalse(predicate.test(secondPickle)); assertFalse(predicate.test(thirdPickle)); assertFalse(predicate.test(fourthPickle)); }
@Override public Collection<SchemaMetaData> load(final MetaDataLoaderMaterial material) throws SQLException { Collection<TableMetaData> tableMetaDataList = new LinkedList<>(); Map<String, Collection<ColumnMetaData>> columnMetaDataMap = loadColumnMetaDataMap(material.getDataSource(), material.getActualTableNames()); if (!columnMetaDataMap.isEmpty()) { Map<String, Collection<IndexMetaData>> indexMetaDataMap = loadIndexMetaData(material.getDataSource(), columnMetaDataMap.keySet()); for (Entry<String, Collection<ColumnMetaData>> entry : columnMetaDataMap.entrySet()) { Collection<IndexMetaData> indexMetaDataList = indexMetaDataMap.getOrDefault(entry.getKey(), Collections.emptyList()); tableMetaDataList.add(new TableMetaData(entry.getKey(), entry.getValue(), indexMetaDataList, Collections.emptyList())); } } return Collections.singleton(new SchemaMetaData(material.getDefaultSchemaName(), tableMetaDataList)); }
@Test void assertLoadWithTablesWithHighVersion() throws SQLException { DataSource dataSource = mockDataSource(); ResultSet resultSet = mockTableMetaDataResultSet(); when(dataSource.getConnection().prepareStatement(LOAD_COLUMN_META_DATA_WITH_TABLES_HIGH_VERSION).executeQuery()).thenReturn(resultSet); ResultSet indexResultSet = mockIndexMetaDataResultSet(); when(dataSource.getConnection().prepareStatement(LOAD_INDEX_META_DATA) .executeQuery()).thenReturn(indexResultSet); when(dataSource.getConnection().getMetaData().getDatabaseMajorVersion()).thenReturn(15); Collection<SchemaMetaData> actual = getDialectTableMetaDataLoader().load(new MetaDataLoaderMaterial(Collections.singletonList("tbl"), dataSource, new SQLServerDatabaseType(), "sharding_db")); assertTableMetaDataMap(actual); TableMetaData actualTableMetaData = actual.iterator().next().getTables().iterator().next(); Iterator<ColumnMetaData> columnsIterator = actualTableMetaData.getColumns().iterator(); assertThat(columnsIterator.next(), is(new ColumnMetaData("id", Types.INTEGER, false, true, true, true, false, false))); assertThat(columnsIterator.next(), is(new ColumnMetaData("name", Types.VARCHAR, false, false, false, false, false, true))); }
@Override public void commitJob(JobContext jobContext) throws IOException { Configuration conf = jobContext.getConfiguration(); syncFolder = conf.getBoolean(DistCpConstants.CONF_LABEL_SYNC_FOLDERS, false); overwrite = conf.getBoolean(DistCpConstants.CONF_LABEL_OVERWRITE, false); updateRoot = conf.getBoolean(CONF_LABEL_UPDATE_ROOT, false); targetPathExists = conf.getBoolean( DistCpConstants.CONF_LABEL_TARGET_PATH_EXISTS, true); ignoreFailures = conf.getBoolean( DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false); if (blocksPerChunk > 0) { concatFileChunks(conf); } super.commitJob(jobContext); cleanupTempFiles(jobContext); try { if (conf.getBoolean(DistCpConstants.CONF_LABEL_DELETE_MISSING, false)) { deleteMissing(conf); } else if (conf.getBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false)) { commitData(conf); } else if (conf.get(CONF_LABEL_TRACK_MISSING) != null) { // save missing information to a directory trackMissing(conf); } // for HDFS-14621, should preserve status after -delete String attributes = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS); final boolean preserveRawXattrs = conf.getBoolean( DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false); if ((attributes != null && !attributes.isEmpty()) || preserveRawXattrs) { preserveFileAttributesForDirectories(conf); } taskAttemptContext.setStatus("Commit Successful"); } finally { cleanup(conf); } }
@Test public void testAtomicCommitMissingFinal() throws IOException { TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config); JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID()); Configuration conf = jobContext.getConfiguration(); String workPath = "/tmp1/" + String.valueOf(rand.nextLong()); String finalPath = "/tmp1/" + String.valueOf(rand.nextLong()); FileSystem fs = null; try { OutputCommitter committer = new CopyCommitter(null, taskAttemptContext); fs = FileSystem.get(conf); fs.mkdirs(new Path(workPath)); conf.set(CONF_LABEL_TARGET_WORK_PATH, workPath); conf.set(CONF_LABEL_TARGET_FINAL_PATH, finalPath); conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, true); assertPathExists(fs, "Work path", new Path(workPath)); assertPathDoesNotExist(fs, "Final path", new Path(finalPath)); committer.commitJob(jobContext); assertPathDoesNotExist(fs, "Work path", new Path(workPath)); assertPathExists(fs, "Final path", new Path(finalPath)); //Test for idempotent commit committer.commitJob(jobContext); assertPathDoesNotExist(fs, "Work path", new Path(workPath)); assertPathExists(fs, "Final path", new Path(finalPath)); } finally { TestDistCpUtils.delete(fs, workPath); TestDistCpUtils.delete(fs, finalPath); conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false); } }
public void validate(DataConnectionConfig dataConnectionConfig) { int numberOfSetItems = getNumberOfSetItems(dataConnectionConfig, CLIENT_XML_PATH, CLIENT_YML_PATH, CLIENT_XML, CLIENT_YML); if (numberOfSetItems != 1) { throw new HazelcastException("HazelcastDataConnection with name '" + dataConnectionConfig.getName() + "' could not be created, " + "provide either a file path with one of " + "\"client_xml_path\" or \"client_yml_path\" properties " + "or a string content with one of \"client_xml\" or \"client_yml\" properties " + "for the client configuration."); } }
@Test public void testValidateEmptyString() { DataConnectionConfig dataConnectionConfig = new DataConnectionConfig(); dataConnectionConfig.setProperty(HazelcastDataConnection.CLIENT_XML, ""); HazelcastDataConnectionConfigValidator validator = new HazelcastDataConnectionConfigValidator(); assertThatCode(() -> validator.validate(dataConnectionConfig)) .isInstanceOf(HazelcastException.class); }
public <T> T invokeModuleFunction(String methodName, Object... argv) { try { if (Modules.Encrypt.METHOD_ENCRYPT_AES.equals(methodName)) { return (T) encryptAES((String) argv[0]); } else if (Modules.Encrypt.METHOD_DECRYPT_AES.equals(methodName)) { return (T) decryptAES((String) argv[0]); } else if (Modules.Encrypt.METHOD_VERIFY_SECRET_KEY.equals(methodName)) { return (T) verifySecretKey((Uri) argv[0]); } else if (Modules.Encrypt.METHOD_ENCRYPT_EVENT_DATA.equals(methodName)) { return (T) encryptEventData(argv[0]); } else if (Modules.Encrypt.METHOD_ENCRYPT_EVENT_DATA_WITH_KEY.equals(methodName)) { return (T) encryptEventData(argv[0], (SecreteKey) argv[1]); } else if (Modules.Encrypt.METHOD_STORE_SECRET_KEY.equals(methodName)) { storeSecretKey((String) argv[0]); } else if (Modules.Encrypt.METHOD_LOAD_SECRET_KEY.equals(methodName)) { return (T) loadSecretKey(); } else if (Modules.Encrypt.METHOD_VERIFY_SUPPORT_TRANSPORT.equals(methodName)) { return (T) mSecretKeyManager.isSupportTransportEncrypt(); } else if (Modules.Encrypt.METHOD_STORE_EVENT.equals(methodName)) { SAEncryptListener encryptListener = mSensorsDataEncrypt.getEncryptListener(); if (encryptListener instanceof AbsSAEncrypt) { return (T) ((AbsSAEncrypt) encryptListener).encryptEventRecord((String) argv[0]); } } else if (Modules.Encrypt.METHOD_LOAD_EVENT.equals(methodName)) { SAEncryptListener encryptListener = mSensorsDataEncrypt.getEncryptListener(); if (encryptListener instanceof AbsSAEncrypt) { return (T) ((AbsSAEncrypt) encryptListener).decryptEventRecord((String) argv[0]); } } } catch (Exception e) { SALog.printStackTrace(e); } return null; }
@Test public void invokeModuleFunction() { SAHelper.initSensors(mApplication); SAEncryptAPIImpl encryptAPIImpl = new SAEncryptAPIImpl(SensorsDataAPI.sharedInstance(mApplication).getSAContextManager()); encryptAPIImpl.invokeModuleFunction(Modules.Encrypt.METHOD_LOAD_SECRET_KEY); }
public boolean sendRequest(AfnemersberichtAanDGL request) { Map<String, Object> extraHeaders = new HashMap<>(); extraHeaders.put(Headers.X_AUX_SENDER_ID, digidOIN); extraHeaders.put(Headers.X_AUX_RECEIVER_ID, digileveringOIN); try { digileveringSender.sendMessage(request, HeaderUtil.createAfnemersberichtAanDGLHeaders(extraHeaders)); if( MessageUtil.getBerichttype(request).equals("Av01")){ digidXClient.remoteLogWithoutRelatingToAccount(Log.SEND_SUCCESS, "Av01"); } else { digidXClient.remoteLogBericht(Log.SEND_SUCCESS, request); } return true; } catch (JmsException jmsException) { logger.error(jmsException.getMessage()); digidXClient.remoteLogBericht(Log.SEND_FAILURE, request); return false; } }
@Test public void testSendAv01Correct(){ AfnemersberichtAanDGLFactory afnemersberichtAanDGLFactory = new AfnemersberichtAanDGLFactory("oin1", "oin2"); AfnemersberichtAanDGL message = afnemersberichtAanDGLFactory.createAfnemersberichtAanDGL(TestDglMessagesUtil.createTestAv01("aNummer")); classUnderTest.sendRequest(message); verify(digileveringSender, times(1)).sendMessage(any(), any()); verify(digidXClient, times(1)).remoteLogWithoutRelatingToAccount(Log.SEND_SUCCESS, "Av01"); }
public void setLanguage(String language) { if(language == null) { // TODO: Log a message? // But how to find the name of the offending GUI element in the case of a TestBean? super.setSyntaxEditingStyle(SyntaxConstants.SYNTAX_STYLE_NONE); } else { final String style = languageProperties.getProperty(language); if (style == null) { super.setSyntaxEditingStyle(SyntaxConstants.SYNTAX_STYLE_NONE); } else { super.setSyntaxEditingStyle(style); } } }
@Test public void testSetLanguage() { try { @SuppressWarnings("deprecation") // test code JSyntaxTextArea textArea = new JSyntaxTextArea(30, 50, false); textArea.setLanguage(null); assertEquals(SyntaxConstants.SYNTAX_STYLE_NONE, textArea.getSyntaxEditingStyle()); } catch (HeadlessException he) { // Does not work in headless mode, which depends on value of java.awt.headless property // and the OS (e.g. might work on MacOS and not on Linux due to missing X11). System.out.println("WARNING for JSyntaxTextAreaTest.testSetLanguage test: does not work in headless mode"); } }
@GetMapping("/list") public Result<List<Namespace>> getNamespaceList() { return Result.success(namespaceOperationService.getNamespaceList()); }
@Test void testGetNamespaceList() { Namespace namespace = new Namespace(); namespace.setNamespace(TEST_NAMESPACE_ID); namespace.setNamespaceShowName(TEST_NAMESPACE_NAME); namespace.setNamespaceDesc(TEST_NAMESPACE_DESC); List<Namespace> namespaceList = Collections.singletonList(namespace); when(namespaceOperationService.getNamespaceList()).thenReturn(namespaceList); Result<List<Namespace>> actualResult = namespaceControllerV2.getNamespaceList(); verify(namespaceOperationService).getNamespaceList(); assertEquals(ErrorCode.SUCCESS.getCode(), actualResult.getCode()); List<Namespace> actualList = actualResult.getData(); Namespace actualNamespace = actualList.get(0); assertEquals(namespaceList.size(), actualList.size()); assertEquals(namespace.getNamespace(), actualNamespace.getNamespace()); assertEquals(namespace.getNamespaceShowName(), actualNamespace.getNamespaceShowName()); assertEquals(namespace.getNamespaceDesc(), actualNamespace.getNamespaceDesc()); }
byte[] createErrorContent(String requestUri, int statusCode, String message) { String sanitizedString = message != null ? StringUtil.sanitizeXmlString(message) : ""; String statusCodeString = Integer.toString(statusCode); writer.resetWriter(); try { writer.write("<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html;charset=ISO-8859-1\"/>\n<title>Error "); writer.write(statusCodeString); writer.write("</title>\n</head>\n<body>\n<h2>HTTP ERROR: "); writer.write(statusCodeString); writer.write("</h2>\n<p>Problem accessing "); writer.write(StringUtil.sanitizeXmlString(requestUri)); writer.write(". Reason:\n<pre> "); writer.write(sanitizedString); writer.write("</pre></p>\n<hr/>\n</body>\n</html>\n"); } catch (IOException e) { // IOException should not be thrown unless writer is constructed using byte[] parameter throw new RuntimeException(e); } return writer.getByteArray(); }
@Test void response_content_matches_expected_string() { String expectedHtml = "<html>\n" + "<head>\n" + "<meta http-equiv=\"Content-Type\" content=\"text/html;charset=ISO-8859-1\"/>\n" + "<title>Error 200</title>\n" + "</head>\n" + "<body>\n" + "<h2>HTTP ERROR: 200</h2>\n" + "<p>Problem accessing http://foo.bar. Reason:\n" + "<pre> My custom error message</pre></p>\n" + "<hr/>\n" + "</body>\n" + "</html>\n"; ErrorResponseContentCreator c = new ErrorResponseContentCreator(); byte[] rawContent = c.createErrorContent( "http://foo.bar", HttpServletResponse.SC_OK, "My custom error message"); String actualHtml = new String(rawContent, StandardCharsets.ISO_8859_1); assertEquals(expectedHtml, actualHtml); }
@Override public List<ShuffleDescriptor> getClusterPartitionShuffleDescriptors( IntermediateDataSetID intermediateDataSetID) { return clusterPartitionShuffleDescriptors.computeIfAbsent( intermediateDataSetID, this::requestShuffleDescriptorsFromResourceManager); }
@Test void testGetShuffleDescriptorsBeforeConnectToResourceManager() { final TestingShuffleMaster shuffleMaster = new TestingShuffleMaster(); IntermediateDataSetID intermediateDataSetId = new IntermediateDataSetID(); final Queue<ReleaseCall> releaseCalls = new ArrayBlockingQueue<>(4); final Queue<PromoteCall> promoteCalls = new ArrayBlockingQueue<>(4); final JobMasterPartitionTrackerImpl partitionTracker = new JobMasterPartitionTrackerImpl( new JobID(), shuffleMaster, resourceId -> Optional.of( createTaskExecutorGateway( resourceId, releaseCalls, promoteCalls))); assertThatThrownBy( () -> partitionTracker.getClusterPartitionShuffleDescriptors( intermediateDataSetId)) .isInstanceOf(NullPointerException.class); }
@Override public String toString() { return toStringHelper(this) .add("total", total) .add("start", start) .add("end", end) .toString(); }
@Test public void toStringTest() { Counter tt = new Counter(100L, 300L, 200L); assertEquals("Counter{total=300, start=100, end=200}", tt.toString()); Counter another = new Counter(200L, 500L, 300L); assertEquals("Counter{total=500, start=200, end=300}", another.toString()); }
@Override public <T> T clone(T object) { if (object instanceof String) { return object; } else if (object instanceof Collection) { Object firstElement = findFirstNonNullElement((Collection) object); if (firstElement != null && !(firstElement instanceof Serializable)) { JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass()); return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type); } } else if (object instanceof Map) { Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object); if (firstEntry != null) { Object key = firstEntry.getKey(); Object value = firstEntry.getValue(); if (!(key instanceof Serializable) || !(value instanceof Serializable)) { JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass()); return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type); } } } else if (object instanceof JsonNode) { return (T) ((JsonNode) object).deepCopy(); } if (object instanceof Serializable) { try { return (T) SerializationHelper.clone((Serializable) object); } catch (SerializationException e) { //it is possible that object itself implements java.io.Serializable, but underlying structure does not //in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization } } return jsonClone(object); }
@Test public void should_clone_jsonnode() { Object original = mapper.getObjectMapper().createArrayNode() .add(BigDecimal.ONE) .add(1.0) .add("string"); Object cloned = serializer.clone(original); assertEquals(original, cloned); assertNotSame(original, cloned); }
@Override public String rpcType() { return RpcTypeEnum.GRPC.getName(); }
@Test public void testRpcType() { assertEquals(RpcTypeEnum.GRPC.getName(), shenyuClientRegisterGrpcService.rpcType()); }
public static SnapshotRef fromJson(String json) { Preconditions.checkArgument( json != null && !json.isEmpty(), "Cannot parse snapshot ref from invalid JSON: %s", json); return JsonUtil.parse(json, SnapshotRefParser::fromJson); }
@Test public void testBranchFromJsonAllFields() { String json = "{\"snapshot-id\":1,\"type\":\"branch\",\"min-snapshots-to-keep\":2," + "\"max-snapshot-age-ms\":3,\"max-ref-age-ms\":4}"; SnapshotRef ref = SnapshotRef.branchBuilder(1L) .minSnapshotsToKeep(2) .maxSnapshotAgeMs(3L) .maxRefAgeMs(4L) .build(); assertThat(SnapshotRefParser.fromJson(json)) .as("Should be able to deserialize branch with all fields") .isEqualTo(ref); }
public void performSortOperation(int option, List<File> pdf) { switch (option) { case DATE_INDEX: sortFilesByDateNewestToOldest(pdf); break; case NAME_INDEX: sortByNameAlphabetical(pdf); break; case SIZE_INCREASING_ORDER_INDEX: sortFilesBySizeIncreasingOrder(pdf); break; case SIZE_DECREASING_ORDER_INDEX: sortFilesBySizeDecreasingOrder(pdf); break; } }
@Test public void shouldThrowOnInvalidSortOption() { // given List<Integer> invalidOptions = asList(-1, 14, 65535, 8); for (Integer item : invalidOptions) { try { // when mInstance.performSortOperation(item, mFiles); } catch (IllegalArgumentException ex) { // then Assert.assertTrue(ex.getMessage().startsWith("Invalid sort option")); } } }
@Retries.RetryTranslated public void retry(String action, String path, boolean idempotent, Retried retrying, InvocationRaisingIOE operation) throws IOException { retry(action, path, idempotent, retrying, () -> { operation.apply(); return null; }); }
@Test(expected = AWSBadRequestException.class) public void testNoRetryOfBadRequestNonIdempotent() throws Throwable { invoker.retry("test", null, false, () -> { throw serviceException(400, "bad request"); }); }
@Override public void validateDeptList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得科室信息 Map<Long, DeptDO> deptMap = getDeptMap(ids); // 校验 ids.forEach(id -> { DeptDO dept = deptMap.get(id); if (dept == null) { throw exception(DEPT_NOT_FOUND); } if (!CommonStatusEnum.ENABLE.getStatus().equals(dept.getStatus())) { throw exception(DEPT_NOT_ENABLE, dept.getName()); } }); }
@Test public void testValidateDeptList_notFound() { // 准备参数 List<Long> ids = singletonList(randomLongId()); // 调用, 并断言异常 assertServiceException(() -> deptService.validateDeptList(ids), DEPT_NOT_FOUND); }
public static <T> T convert(Class<T> type, Object value) throws ConvertException { return convert((Type) type, value); }
@Test public void toClassTest() { final Class<?> convert = Convert.convert(Class.class, "cn.hutool.core.convert.ConvertTest.Product"); assertSame(Product.class, convert); }
@Override public void close() { metrics.removeMetric(commandRunnerStatusNumMetricName); metrics.removeMetric(commandRunnerDegradedReasonNumMetricName); metrics.removeMetric(commandRunnerStatusMetricName); metrics.removeMetric(commandRunnerDegradedReasonMetricName); metrics.removeMetric(commandRunnerStatusMetricNameLegacy); metrics.removeMetric(commandRunnerDegradedReasonMetricNameLegacy); }
@Test public void shouldRemoveNoneCommandRunnerDegradedReason() { // When: commandRunnerMetrics.close(); // Then: verify(metrics).removeMetric(METRIC_NAME_1_LEGACY); verify(metrics).removeMetric(METRIC_NAME_2_LEGACY); verify(metrics).removeMetric(METRIC_NAME_1); verify(metrics).removeMetric(METRIC_NAME_2); verify(metrics).removeMetric(NUM_METRIC_NAME_1); verify(metrics).removeMetric(NUM_METRIC_NAME_2); }
@Override public void close() { if (!closed) { closed = true; try { outputStreamWrapper.close(); outputStreamWrapper.cleanup(); } catch (Throwable t) { LOG.warn("Could not close the state stream for {}.", metadataFilePath, t); } } }
@TestTemplate void testCleanupWhenClosed() throws Exception { Path metaDataFilePath = baseFolder(); FsCheckpointMetadataOutputStream stream = createTestStream(metaDataFilePath, fileSystem); stream.close(); assertThat(fileSystem.exists(metaDataFilePath)).isFalse(); }
protected ScopeTimeConfig calculateStartAndEndForJudgement( CanaryAnalysisExecutionRequest config, long judgementNumber, Duration judgementDuration) { Duration warmupDuration = config.getBeginCanaryAnalysisAfterAsDuration(); Duration offset = judgementDuration.multipliedBy(judgementNumber); ScopeTimeConfig scopeTimeConfig = new ScopeTimeConfig(); Instant startTime = Optional.ofNullable(config.getStartTime()).orElse(now(clock)); scopeTimeConfig.start = startTime; scopeTimeConfig.end = startTime.plus(offset); if (config.getEndTime() == null) { scopeTimeConfig.start = scopeTimeConfig.start.plus(warmupDuration); scopeTimeConfig.end = scopeTimeConfig.end.plus(warmupDuration); } // If the look back is defined, use it to recalculate the start time, this is used to do sliding // window judgements if (config.getLookBackAsInstant().isAfter(ZERO_AS_INSTANT)) { scopeTimeConfig.start = scopeTimeConfig.end.minus(config.getLookBackAsDuration()); } return scopeTimeConfig; }
@Test public void test_that_calculateStartAndEndForJudgement_has_expected_start_and_end_when_nothing_is_set_in_the_scopes() { CanaryAnalysisExecutionRequest request = CanaryAnalysisExecutionRequest.builder() .scopes(ImmutableList.of(CanaryAnalysisExecutionRequestScope.builder().build())) .build(); Duration intervalDuration = Duration.ofMinutes(3); for (int i = 1; i < 6; i++) { SetupAndExecuteCanariesStage.ScopeTimeConfig conf = stage.calculateStartAndEndForJudgement(request, i, intervalDuration); assertEquals(now, conf.getStart()); assertEquals(now.plus(i * 3, ChronoUnit.MINUTES), conf.getEnd()); } }
public static String getOrCreateUuid(@Nullable UUID uuid) { if (uuid != null) { return uuid.toString(); } return UUID.randomUUID().toString(); }
@Test public void testGetOrCreateUuid() { UUID expected = UUID.randomUUID(); Assert.assertEquals(expected.toString(), IdHelper.getOrCreateUuid(expected)); }
@Override public MapperResult findConfigInfoBaseByGroupFetchRows(MapperContext context) { String sql = "SELECT id,data_id,group_id,content FROM config_info WHERE group_id=? AND tenant_id=?" + " LIMIT " + context.getStartRow() + "," + context.getPageSize(); return new MapperResult(sql, CollectionUtils.list(context.getWhereParameter(FieldConstant.GROUP_ID), context.getWhereParameter(FieldConstant.TENANT_ID))); }
@Test void testFindConfigInfoBaseByGroupFetchRows() { context.putWhereParameter(FieldConstant.GROUP_ID, groupId); MapperResult mapperResult = configInfoMapperByMySql.findConfigInfoBaseByGroupFetchRows(context); assertEquals(mapperResult.getSql(), "SELECT id,data_id,group_id,content FROM config_info WHERE group_id=? AND tenant_id=? LIMIT " + startRow + "," + pageSize); assertArrayEquals(new Object[] {groupId, tenantId}, mapperResult.getParamList().toArray()); }
@Override public boolean equals(Object obj) { if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } PropertyDto other = (PropertyDto) obj; return Objects.equals(this.key, other.key) && Objects.equals(this.userUuid, other.userUuid) && Objects.equals(this.entityUuid, other.entityUuid) && Objects.equals(this.value, other.value); }
@Test void testEquals() { assertThat(new PropertyDto().setKey("123").setEntityUuid("uuid123")).isEqualTo(new PropertyDto().setKey("123").setEntityUuid("uuid123" )); assertThat(new PropertyDto().setKey("1234").setEntityUuid("uuid123")).isNotEqualTo(new PropertyDto().setKey("123").setEntityUuid( "uuid123")); assertThat(new PropertyDto().setKey("1234").setEntityUuid("uuid123")).isNotNull(); assertThat(new PropertyDto().setKey("1234").setEntityUuid("uuid123")).isNotEqualTo(new Object()); }
@Deprecated @Restricted(DoNotUse.class) public static String resolve(ConfigurationContext context, String toInterpolate) { return context.getSecretSourceResolver().resolve(toInterpolate); }
@Test public void resolve_noInterpolatorStringLookupForARegularFileOrBase64Variable() { environment.set("base64", "foo"); environment.set("file", "foo"); assertThat(resolve("${base64}"), equalTo("foo")); assertThat(resolve("${file}"), equalTo("foo")); }
@Override public String getFileName(String baseRepositoryURL, Map<String, List<String>> headers) { return baseRepositoryURL.substring(baseRepositoryURL.lastIndexOf("/") + 1); }
@Test void testGetFileName() { SimpleReferenceURLBuilder builder = new SimpleReferenceURLBuilder(); assertEquals("API_Pastry_1.0.0-openapi.yaml", builder.getFileName(BASE_URL, null)); }
public StepExpression createExpression(StepDefinition stepDefinition) { List<ParameterInfo> parameterInfos = stepDefinition.parameterInfos(); if (parameterInfos.isEmpty()) { return createExpression( stepDefinition.getPattern(), stepDefinitionDoesNotTakeAnyParameter(stepDefinition), false); } ParameterInfo parameterInfo = parameterInfos.get(parameterInfos.size() - 1); return createExpression( stepDefinition.getPattern(), parameterInfo.getTypeResolver()::resolve, parameterInfo.isTransposed()); }
@Test void table_expression_with_type_creates_table_from_table() { StepDefinition stepDefinition = new StubStepDefinition("Given some stuff:", DataTable.class); StepExpression expression = stepExpressionFactory.createExpression(stepDefinition); List<Argument> match = expression.match("Given some stuff:", table); DataTable dataTable = (DataTable) match.get(0).getValue(); assertThat(dataTable.cells(), is(equalTo(table))); }
@Override public CheckResult runCheck() { try { final String filter = buildQueryFilter(stream.getId(), query); // TODO we don't support cardinality yet final FieldStatsResult fieldStatsResult = searches.fieldStats(field, "*", filter, RelativeRange.create(time * 60), false, true, false); if (fieldStatsResult.count() == 0) { LOG.debug("Alert check <{}> did not match any messages. Returning not triggered.", type); return new NegativeCheckResult(); } final double result; switch (type) { case MEAN: result = fieldStatsResult.mean(); break; case MIN: result = fieldStatsResult.min(); break; case MAX: result = fieldStatsResult.max(); break; case SUM: result = fieldStatsResult.sum(); break; case STDDEV: result = fieldStatsResult.stdDeviation(); break; default: LOG.error("No such field value check type: [{}]. Returning not triggered.", type); return new NegativeCheckResult(); } LOG.debug("Alert check <{}> result: [{}]", id, result); if (Double.isInfinite(result)) { // This happens when there are no ES results/docs. LOG.debug("Infinite value. Returning not triggered."); return new NegativeCheckResult(); } final boolean triggered; switch (thresholdType) { case HIGHER: triggered = result > threshold.doubleValue(); break; case LOWER: triggered = result < threshold.doubleValue(); break; default: triggered = false; } if (triggered) { final String resultDescription = "Field " + field + " had a " + type + " of " + decimalFormat.format(result) + " in the last " + time + " minutes with trigger condition " + thresholdType + " than " + decimalFormat.format(threshold) + ". " + "(Current grace time: " + grace + " minutes)"; final List<MessageSummary> summaries; if (getBacklog() > 0) { final List<ResultMessage> searchResult = fieldStatsResult.searchHits(); summaries = Lists.newArrayListWithCapacity(searchResult.size()); for (ResultMessage resultMessage : searchResult) { final Message msg = resultMessage.getMessage(); summaries.add(new MessageSummary(resultMessage.getIndex(), msg)); } } else { summaries = Collections.emptyList(); } return new CheckResult(true, this, resultDescription, Tools.nowUTC(), summaries); } else { return new NegativeCheckResult(); } } catch (InvalidRangeParametersException e) { // cannot happen lol LOG.error("Invalid timerange.", e); return null; } catch (FieldTypeException e) { LOG.debug("Field [{}] seems not to have a numerical type or doesn't even exist at all. Returning not triggered.", field, e); return new NegativeCheckResult(); } }
@Test public void testRunCheckLowerPositive() throws Exception { for (FieldValueAlertCondition.CheckType checkType : FieldValueAlertCondition.CheckType.values()) { final double threshold = 50.0; final double lowerThanThreshold = threshold - 10; FieldValueAlertCondition fieldValueAlertCondition = getFieldValueAlertCondition(getParametersMap(0, 0, FieldValueAlertCondition.ThresholdType.LOWER, checkType, threshold, "response_time"), alertConditionTitle); fieldStatsShouldReturn(getFieldStatsResult(checkType, lowerThanThreshold)); AlertCondition.CheckResult result = fieldValueAlertCondition.runCheck(); assertTriggered(fieldValueAlertCondition, result); } }
@Override public Map<String, String> evaluate(FunctionArgs args, EvaluationContext context) { final String value = valueParam.required(args, context); if (Strings.isNullOrEmpty(value)) { return Collections.emptyMap(); } final CharMatcher kvPairsMatcher = splitParam.optional(args, context).orElse(CharMatcher.whitespace()); final CharMatcher kvDelimMatcher = valueSplitParam.optional(args, context).orElse(CharMatcher.anyOf("=")); Splitter outerSplitter = Splitter.on(DelimiterCharMatcher.withQuoteHandling(kvPairsMatcher)) .omitEmptyStrings() .trimResults(); final Splitter entrySplitter = Splitter.on(kvDelimMatcher) .omitEmptyStrings() .limit(2) .trimResults(); return new MapSplitter(outerSplitter, entrySplitter, ignoreEmptyValuesParam.optional(args, context).orElse(true), trimCharactersParam.optional(args, context).orElse(CharMatcher.none()), trimValueCharactersParam.optional(args, context).orElse(CharMatcher.none()), allowDupeKeysParam.optional(args, context).orElse(true), duplicateHandlingParam.optional(args, context).orElse(TAKE_FIRST)) .split(value); }
@Test void testDisableIgnoreEmptyValues() { final Map<String, Expression> arguments = Map.of("value", valueExpression, "ignore_empty_values", new BooleanExpression(new CommonToken(0), false)); Assertions.assertThrows(IllegalArgumentException.class, () -> classUnderTest.evaluate(new FunctionArgs(classUnderTest, arguments), evaluationContext)); }
public void validate(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments) { verifyReferredDocumentsArePresent(documentDefinitions); verifyReferredDocumentsAreGlobal(documentDefinitions, globallyDistributedDocuments); }
@Test void validation_succeeds_on_no_documents() { new GlobalDistributionValidator() .validate(Map.of(), Set.of()); }
public static ExecutionEnvironment createBatchExecutionEnvironment(FlinkPipelineOptions options) { return createBatchExecutionEnvironment( options, MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()), options.getFlinkConfDir()); }
@Test public void shouldAllowPortOmissionForRemoteEnvironmentBatch() { FlinkPipelineOptions options = getDefaultPipelineOptions(); options.setRunner(FlinkRunner.class); options.setFlinkMaster("host"); ExecutionEnvironment bev = FlinkExecutionEnvironments.createBatchExecutionEnvironment(options); assertThat(bev, instanceOf(RemoteEnvironment.class)); checkHostAndPort(bev, "host", RestOptions.PORT.defaultValue()); }
public PartitionRegistration merge(PartitionChangeRecord record) { int[] newReplicas = (record.replicas() == null) ? replicas : Replicas.toArray(record.replicas()); Uuid[] newDirectories = defaultToMigrating( (record.directories() == null) ? directories : Uuid.toArray(checkDirectories(record)), newReplicas.length ); int[] newIsr = (record.isr() == null) ? isr : Replicas.toArray(record.isr()); int[] newRemovingReplicas = (record.removingReplicas() == null) ? removingReplicas : Replicas.toArray(record.removingReplicas()); int[] newAddingReplicas = (record.addingReplicas() == null) ? addingReplicas : Replicas.toArray(record.addingReplicas()); int newLeader; int newLeaderEpoch; if (record.leader() == NO_LEADER_CHANGE) { newLeader = leader; newLeaderEpoch = leaderEpoch; } else { newLeader = record.leader(); newLeaderEpoch = leaderEpoch + 1; } LeaderRecoveryState newLeaderRecoveryState = leaderRecoveryState.changeTo(record.leaderRecoveryState()); int[] newElr = (record.eligibleLeaderReplicas() == null) ? elr : Replicas.toArray(record.eligibleLeaderReplicas()); int[] newLastKnownElr = (record.lastKnownElr() == null) ? lastKnownElr : Replicas.toArray(record.lastKnownElr()); return new PartitionRegistration(newReplicas, newDirectories, newIsr, newRemovingReplicas, newAddingReplicas, newLeader, newLeaderRecoveryState, newLeaderEpoch, partitionEpoch + 1, newElr, newLastKnownElr); }
@Test public void testMergePartitionChangeRecordWithReassignmentData() { Uuid dir1 = Uuid.fromString("FbRuu7CeQtq5YFreEzg16g"); Uuid dir2 = Uuid.fromString("4rtHTelWSSStAFMODOg3cQ"); Uuid dir3 = Uuid.fromString("Id1WXzHURROilVxZWJNZlw"); PartitionRegistration partition0 = new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3}). setDirectories(new Uuid[]{dir1, dir2, dir3}). setIsr(new int[] {1, 2, 3}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).build(); PartitionRegistration partition1 = partition0.merge(new PartitionChangeRecord(). setRemovingReplicas(Collections.singletonList(3)). setAddingReplicas(Collections.singletonList(4)). setReplicas(Arrays.asList(1, 2, 3, 4)). setDirectories(Arrays.asList(dir1, dir2, dir3, DirectoryId.UNASSIGNED))); assertEquals(new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3, 4}). setDirectories(new Uuid[]{dir1, dir2, dir3, DirectoryId.UNASSIGNED}). setIsr(new int[] {1, 2, 3}).setRemovingReplicas(new int[] {3}).setAddingReplicas(new int[] {4}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(201).build(), partition1); PartitionRegistration partition2 = partition1.merge(new PartitionChangeRecord(). setIsr(Arrays.asList(1, 2, 4)). setRemovingReplicas(Collections.emptyList()). setAddingReplicas(Collections.emptyList()). setReplicas(Arrays.asList(1, 2, 4)). setDirectories(Arrays.asList(dir1, dir2, DirectoryId.UNASSIGNED))); assertEquals(new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 4}). setDirectories(new Uuid[]{dir1, dir2, DirectoryId.UNASSIGNED}). setIsr(new int[] {1, 2, 4}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(202).build(), partition2); }
public static TimeSlot normalizeToSegmentBoundaries(TimeSlot timeSlot, SegmentInMinutes unit) { return new SlotToNormalizedSlot().apply(timeSlot, unit); }
@Test void normalizingATimeSlot() { //given Instant start = Instant.parse("2023-09-09T00:10:00Z"); Instant end = Instant.parse("2023-09-09T01:00:00Z"); TimeSlot timeSlot = new TimeSlot(start, end); //when TimeSlot segment = Segments.normalizeToSegmentBoundaries(timeSlot, SegmentInMinutes.of(90, FIFTEEN_MINUTES_SEGMENT_DURATION)); //then assertEquals(Instant.parse("2023-09-09T00:00:00Z"), segment.from()); assertEquals(Instant.parse("2023-09-09T01:30:00Z"), segment.to()); }
private static Schema optional(Schema original) { // null is first in the union because Parquet's default is always null return Schema.createUnion(Arrays.asList(Schema.create(Schema.Type.NULL), original)); }
@Test public void testArrayOfOptionalRecords() throws Exception { Schema innerRecord = Schema.createRecord("element", null, null, false); Schema optionalString = optional(Schema.create(Schema.Type.STRING)); innerRecord.setFields(Lists.newArrayList( new Schema.Field("s1", optionalString, null, JsonProperties.NULL_VALUE), new Schema.Field("s2", optionalString, null, JsonProperties.NULL_VALUE))); Schema schema = Schema.createRecord("HasArray", null, null, false); schema.setFields( Lists.newArrayList(new Schema.Field("myarray", Schema.createArray(optional(innerRecord)), null, null))); System.err.println("Avro schema: " + schema.toString(true)); testRoundTripConversion( NEW_BEHAVIOR, schema, "message HasArray {\n" + " required group myarray (LIST) {\n" + " repeated group list {\n" + " optional group element {\n" + " optional binary s1 (UTF8);\n" + " optional binary s2 (UTF8);\n" + " }\n" + " }\n" + " }\n" + "}\n"); }
@Nonnull @Override public Optional<? extends Algorithm> parse( @Nullable final String str, @Nonnull DetectionLocation detectionLocation) { if (str == null) { return Optional.empty(); } String algorithmStr; Optional<Mode> modeOptional = Optional.empty(); Optional<? extends Padding> paddingOptional = Optional.empty(); if (str.contains("/")) { int slashIndex = str.indexOf("/"); algorithmStr = str.substring(0, slashIndex); String rest = str.substring(slashIndex + 1); if (rest.contains("/")) { slashIndex = rest.indexOf("/"); // mode final String modeStr = rest.substring(0, slashIndex); final JcaModeMapper jcaModeMapper = new JcaModeMapper(); modeOptional = jcaModeMapper.parse(modeStr, detectionLocation); // padding String paddingStr = rest.substring(slashIndex + 1); final JcaPaddingMapper jcaPaddingMapper = new JcaPaddingMapper(); paddingOptional = jcaPaddingMapper.parse(paddingStr, detectionLocation); } } else { algorithmStr = str; } // check if it is pbe JcaPasswordBasedEncryptionMapper pbeMapper = new JcaPasswordBasedEncryptionMapper(); Optional<PasswordBasedEncryption> pbeOptional = pbeMapper.parse(algorithmStr, detectionLocation); if (pbeOptional.isPresent()) { // pbe return pbeOptional; } Optional<? extends Algorithm> possibleCipher = map(algorithmStr, detectionLocation); if (possibleCipher.isEmpty()) { return Optional.empty(); } final Algorithm algorithm = possibleCipher.get(); modeOptional.ifPresent(algorithm::put); paddingOptional.ifPresent(algorithm::put); return Optional.of(algorithm); }
@Test void pbe() { DetectionLocation testDetectionLocation = new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL"); JcaCipherMapper jcaCipherMapper = new JcaCipherMapper(); Optional<? extends Algorithm> cipherOptional = jcaCipherMapper.parse("PBEWithMD5AndDES", testDetectionLocation); assertThat(cipherOptional).isPresent(); assertThat(cipherOptional.get().is(PasswordBasedEncryption.class)).isTrue(); PasswordBasedEncryption pbe = (PasswordBasedEncryption) cipherOptional.get(); assertThat(pbe.getName()).isEqualTo("PBEWithMD5AndDES"); assertThat(pbe.hasChildren()).isTrue(); assertThat(pbe.getChildren().values()).hasSize(2); assertThat(pbe.getDigest()).isPresent(); MessageDigest digest = pbe.getDigest().get(); assertThat(digest).isInstanceOf(MD5.class); assertThat(digest.getDigestSize()).isPresent(); assertThat(digest.getDigestSize().get().getValue()).isEqualTo(128); assertThat(digest.getBlockSize()).isPresent(); assertThat(digest.getBlockSize().get().getValue()).isEqualTo(512); assertThat(pbe.getCipher()).isPresent(); IPrimitive encryptionAlgorithm = pbe.getCipher().get(); assertThat(encryptionAlgorithm).isInstanceOf(DES.class); }
DataTableType lookupTableTypeByType(Type type) { return lookupTableTypeByType(type, Function.identity()); }
@Test void null_short_transformed_to_null() { DataTableTypeRegistry registry = new DataTableTypeRegistry(Locale.ENGLISH); DataTableType dataTableType = registry.lookupTableTypeByType(LIST_OF_LIST_OF_SHORT); assertEquals( singletonList(singletonList(null)), dataTableType.transform(singletonList(singletonList(null)))); }
public String get(final String name) { String[] values = metadata.get(name); if (values == null) { return null; } else { return values[0]; } }
@Test public void testMultithreadedDates() throws Exception { int numThreads = 10; ExecutorService executorService = Executors.newFixedThreadPool(numThreads); ExecutorCompletionService<Integer> executorCompletionService = new ExecutorCompletionService<>(executorService); for (int i = 0; i < numThreads; i++) { executorCompletionService.submit(new MetadataDateAdder()); } int finished = 0; while (finished < numThreads) { Future<Integer> future = executorCompletionService.take(); if (future != null && future.isDone()) { Integer retVal = future.get(); finished++; } } }
@Override public UUID generateId() { long counterValue = counter.incrementAndGet(); if (counterValue == MAX_COUNTER_VALUE) { throw new CucumberException( "Out of " + IncrementingUuidGenerator.class.getSimpleName() + " capacity. Please generate using a new instance or use another " + UuidGenerator.class.getSimpleName() + "implementation."); } long leastSigBits = counterValue | 0x8000000000000000L; // set variant return new UUID(msb, leastSigBits); }
@Test void different_classloaders_generators() { // Given/When List<UUID> uuids = IntStream.rangeClosed(1, 10) .mapToObj(i -> getUuidGeneratorFromOtherClassloader(i).generateId()) .collect(Collectors.toList()); // Then checkUuidProperties(uuids); }
public SortedMap<String, HealthCheck.Result> runHealthChecks() { return runHealthChecks(HealthCheckFilter.ALL); }
@Test public void runsRegisteredHealthChecks() { final Map<String, HealthCheck.Result> results = registry.runHealthChecks(); assertThat(results).contains(entry("hc1", r1)); assertThat(results).contains(entry("hc2", r2)); assertThat(results).containsKey("ahc"); }
@Override public long extractWatermark(IcebergSourceSplit split) { return split.task().files().stream() .map( scanTask -> { Preconditions.checkArgument( scanTask.file().lowerBounds() != null && scanTask.file().lowerBounds().get(eventTimeFieldId) != null, "Missing statistics for column name = %s in file = %s", eventTimeFieldName, eventTimeFieldId, scanTask.file()); return timeUnit.toMillis( Conversions.fromByteBuffer( Types.LongType.get(), scanTask.file().lowerBounds().get(eventTimeFieldId))); }) .min(Comparator.comparingLong(l -> l)) .get(); }
@TestTemplate public void testMultipleFiles() throws IOException { assumeThat(columnName).isEqualTo("timestamp_column"); IcebergSourceSplit combinedSplit = IcebergSourceSplit.fromCombinedScanTask( ReaderUtil.createCombinedScanTask( TEST_RECORDS, temporaryFolder, FileFormat.PARQUET, APPENDER_FACTORY)); ColumnStatsWatermarkExtractor extractor = new ColumnStatsWatermarkExtractor(SCHEMA, columnName, null); assertThat(extractor.extractWatermark(split(0))) .isEqualTo(MIN_VALUES.get(0).get(columnName).longValue()); assertThat(extractor.extractWatermark(split(1))) .isEqualTo(MIN_VALUES.get(1).get(columnName).longValue()); assertThat(extractor.extractWatermark(combinedSplit)) .isEqualTo(Math.min(MIN_VALUES.get(0).get(columnName), MIN_VALUES.get(1).get(columnName))); }
public static <T> T copyProperties(Object source, Class<T> tClass, String... ignoreProperties) { if (null == source) { return null; } T target = ReflectUtil.newInstanceIfPossible(tClass); copyProperties(source, target, CopyOptions.create().setIgnoreProperties(ignoreProperties)); return target; }
@Test @Disabled public void multiThreadTest() { final Student student = new Student(); student.setName("张三"); student.setAge(123); student.setNo(3158L); final Student student2 = new Student(); student.setName("李四"); student.setAge(125); student.setNo(8848L); final List<Student> studentList = ListUtil.of(student, student2); for (int i = 0; i < 5000; i++) { new Thread(() -> { final List<Student> list = ObjectUtil.clone(studentList); final List<Student> listReps = list.stream().map(s1 -> { final Student s2 = new Student(); BeanUtil.copyProperties(s1, s2); return s2; }).collect(Collectors.toList()); System.out.println(listReps); }).start(); } ThreadUtil.waitForDie(); }
public static String toString(JobId jid) { return jid.toString(); }
@Test @Timeout(120000) public void testJobIDtoString() { JobId jid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class); jid.setAppId(ApplicationId.newInstance(0, 0)); assertEquals("job_0_0000", MRApps.toString(jid)); }
public Buffer getBuffer() { LOGGER.info("Get current buffer: " + current); return frameBuffers[current]; }
@Test void testGetBuffer() { try { var scene = new Scene(); var field1 = Scene.class.getDeclaredField("current"); field1.setAccessible(true); field1.set(scene, 0); var frameBuffers = new FrameBuffer[2]; var frameBuffer = new FrameBuffer(); frameBuffer.draw(0, 0); frameBuffers[0] = frameBuffer; var field2 = Scene.class.getDeclaredField("frameBuffers"); field2.setAccessible(true); field2.set(scene, frameBuffers); assertEquals(frameBuffer, scene.getBuffer()); } catch (NoSuchFieldException | IllegalAccessException e) { fail("Fail to access private field."); } }
@Override public void checkTopicAccess( final KsqlSecurityContext securityContext, final String topicName, final AclOperation operation ) { final Set<AclOperation> authorizedOperations = securityContext.getServiceContext() .getTopicClient().describeTopic(topicName).authorizedOperations(); // Kakfa 2.2 or lower do not support authorizedOperations(). In case of running on a // unsupported broker version, then the authorizeOperation will be null. if (authorizedOperations != null && !authorizedOperations.contains(operation)) { // This error message is similar to what Kafka throws when it cannot access the topic // due to an authorization error. I used this message to keep a consistent message. throw new KsqlTopicAuthorizationException(operation, Collections.singleton(topicName)); } }
@Test public void shouldThrowExceptionWhenDescribeTopicFails() { // Given: when(kafkaTopicClient.describeTopic(TOPIC_NAME_1)) .thenThrow(KafkaResponseGetFailedException.class); // When: assertThrows( KafkaResponseGetFailedException.class, () -> accessValidator.checkTopicAccess(securityContext, TOPIC_NAME_1, AclOperation.READ) ); }
public static long toLong(String value) { String[] octets = value.split(":"); if (octets.length > 8) { throw new NumberFormatException("Input string is too big to fit in long: " + value); } long l = 0; for (String octet: octets) { if (octet.length() > 2) { throw new NumberFormatException( "Each colon-separated byte component must consist of 1 or 2 hex digits: " + value); } short s = Short.parseShort(octet, 16); l = (l << 8) + s; } return l; }
@Test(expected = NumberFormatException.class) public void testToLongError() { String dpidStr = "09:08:07:06:05:04:03:02:01"; HexString.toLong(dpidStr); fail("HexString.toLong() should have thrown a NumberFormatException"); }
@Override public KsMaterializedQueryResult<Row> get( final GenericKey key, final int partition, final Optional<Position> position ) { try { final ReadOnlyKeyValueStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore .store(QueryableStoreTypes.timestampedKeyValueStore(), partition); final ValueAndTimestamp<GenericRow> row = store.get(key); if (row == null) { return KsMaterializedQueryResult.rowIterator(Collections.emptyIterator()); } else { return KsMaterializedQueryResult.rowIterator(ImmutableList.of(Row.of( stateStore.schema(), key, row.value(), row.timestamp())).iterator()); } } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldGetStoreWithCorrectParams_fullTableScan() { // Given: when(tableStore.all()).thenReturn(keyValueIterator); // When: table.get(PARTITION); // Then: verify(stateStore).store(storeTypeCaptor.capture(), anyInt()); assertThat(storeTypeCaptor.getValue().getClass().getSimpleName(), is("TimestampedKeyValueStoreType")); }
public boolean eval(ContentFile<?> file) { // TODO: detect the case where a column is missing from the file using file's max field id. return new MetricsEvalVisitor().eval(file); }
@Test public void testNot() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, not(lessThan("id", INT_MIN_VALUE - 25))).eval(FILE); assertThat(shouldRead).as("Should not match: not(false)").isTrue(); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(greaterThan("id", INT_MIN_VALUE - 25))).eval(FILE); assertThat(shouldRead).as("Should match: not(true)").isFalse(); }
@Override public String getPathForInstances(String serviceName) { return ZKPaths.makePath(basePath, serviceName); }
@Test public void testGetPathForInstances() { DiscoveryPathConstructorImpl constructor = new DiscoveryPathConstructorImpl("/foo/bar"); assertThat(constructor.getPathForInstances("baz")).isEqualTo("/foo/bar/baz"); assertThat(constructor.getPathForInstances("")).isEqualTo("/foo/bar"); assertThat(constructor.getPathForInstances(null)).isEqualTo("/foo/bar"); }
@Override public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) { if (!joinKey.isForeignKey()) { ensureMatchingPartitionCounts(buildContext.getServiceContext().getTopicClient()); } final JoinerFactory joinerFactory = new JoinerFactory( buildContext, this, buildContext.buildNodeContext(getId().toString())); return joinerFactory.getJoiner(left.getNodeOutputType(), right.getNodeOutputType()).join(); }
@Test public void shouldPerformStreamToStreamInnerJoinWithGracePeriod() { // Given: setupStream(left, leftSchemaKStream); setupStream(right, rightSchemaKStream); final JoinNode joinNode = new JoinNode(nodeId, INNER, joinKey, true, left, right, WITHIN_EXPRESSION_WITH_GRACE, "KAFKA"); // When: joinNode.buildStream(planBuildContext); // Then: verify(leftSchemaKStream).innerJoin( rightSchemaKStream, SYNTH_KEY, WITHIN_EXPRESSION_WITH_GRACE.get(), VALUE_FORMAT.getFormatInfo(), OTHER_FORMAT.getFormatInfo(), CONTEXT_STACKER ); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback deleteCallback, final ConnectionCallback connectionCallback) throws BackgroundException { try { session.getClient().move(file.getAbsolute(), renamed.getAbsolute()); // Copy original file attributes return renamed.withAttributes(file.attributes()); } catch(MantaException e) { throw new MantaExceptionMappingService().map("Cannot rename {0}", e, file); } catch(MantaClientHttpResponseException e) { throw new MantaHttpExceptionMappingService().map("Cannot rename {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot rename {0}", e, file); } }
@Test public void testMoveRename() throws BackgroundException { final Directory directory = new MantaDirectoryFeature(session); final Touch touch = new MantaTouchFeature(session); final Move move = new MantaMoveFeature(session); final Delete delete = new MantaDeleteFeature(session); final AttributesFinder attributesFinder = new MantaAttributesFinderFeature(session); final Path drive = new MantaDirectoryFeature(session).mkdir(randomDirectory(), new TransferStatus()); Path targetDirectory = new Path(drive, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)); directory.mkdir(targetDirectory, null); assertNotNull(attributesFinder.find(targetDirectory)); Path touchedFile = new Path(drive, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); touch.touch(touchedFile, new TransferStatus().withMime("x-application/cyberduck")); assertNotNull(attributesFinder.find(touchedFile)); Path rename = new Path(targetDirectory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); assertTrue(move.isSupported(touchedFile, rename)); assertEquals(rename, move.move(touchedFile, rename, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback())); assertNotNull(attributesFinder.find(rename)); assertFalse(new MantaFindFeature(session).find(touchedFile)); assertTrue(new MantaFindFeature(session).find(rename)); delete.delete(Collections.singletonList(targetDirectory), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@VisibleForTesting AccessType extractAccessType(Method endpointMethod) { if (endpointMethod.isAnnotationPresent(Authenticate.class)) { return endpointMethod.getAnnotation(Authenticate.class).value(); } else { // heuristically infer access type via javax.ws.rs annotations if (endpointMethod.getAnnotation(POST.class) != null) { return AccessType.CREATE; } else if (endpointMethod.getAnnotation(PUT.class) != null) { return AccessType.UPDATE; } else if (endpointMethod.getAnnotation(DELETE.class) != null) { return AccessType.DELETE; } } return AccessType.READ; }
@Test public void testExtractAccessTypeWithMissingAuthAnnotation() throws Exception { Method method = AuthenticationFilterTest.class.getMethod("methodWithGet"); assertEquals(AccessType.READ, _authFilter.extractAccessType(method)); method = AuthenticationFilterTest.class.getMethod("methodWithPost"); assertEquals(AccessType.CREATE, _authFilter.extractAccessType(method)); method = AuthenticationFilterTest.class.getMethod("methodWithPut"); assertEquals(AccessType.UPDATE, _authFilter.extractAccessType(method)); method = AuthenticationFilterTest.class.getMethod("methodWithDelete"); assertEquals(AccessType.DELETE, _authFilter.extractAccessType(method)); }