focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Description("Encode binary data as base32") @ScalarFunction @SqlType(StandardTypes.VARCHAR) public static Slice toBase32(@SqlType(StandardTypes.VARBINARY) Slice slice) { String encoded; if (slice.hasByteArray()) { encoded = BaseEncoding.base32().encode(slice.byteArray(), slice.byteArrayOffset(), slice.length()); } else { encoded = BaseEncoding.base32().encode(slice.getBytes()); } return Slices.utf8Slice(encoded); }
@Test public void testToBase32() { assertFunction("to_base32(CAST('' AS VARBINARY))", VARCHAR, encodeBase32("")); assertFunction("to_base32(CAST('a' AS VARBINARY))", VARCHAR, encodeBase32("a")); assertFunction("to_base32(CAST('abc' AS VARBINARY))", VARCHAR, encodeBase32("abc")); assertFunction("to_base32(CAST('hello world' AS VARBINARY))", VARCHAR, "NBSWY3DPEB3W64TMMQ======"); assertFunction("to_base32(NULL)", VARCHAR, null); }
static boolean isTableUsingInstancePoolAndReplicaGroup(@Nonnull TableConfig tableConfig) { boolean status = true; Map<String, InstanceAssignmentConfig> instanceAssignmentConfigMap = tableConfig.getInstanceAssignmentConfigMap(); if (instanceAssignmentConfigMap != null) { for (InstanceAssignmentConfig instanceAssignmentConfig : instanceAssignmentConfigMap.values()) { if (instanceAssignmentConfig != null) { status &= (instanceAssignmentConfig.getTagPoolConfig().isPoolBased() && instanceAssignmentConfig.getReplicaGroupPartitionConfig().isReplicaGroupBased()); } else { status = false; } } } else { status = false; } return status; }
@Test public void testNoRgOfflineTable() { InstanceAssignmentConfig config = new InstanceAssignmentConfig(new InstanceTagPoolConfig("DefaultTenant", true, 0, null), null, new InstanceReplicaGroupPartitionConfig(false, 0, 0, 0, 0, 0, false, null), null, false); TableConfig tableConfig = new TableConfig("table", TableType.OFFLINE.name(), new SegmentsValidationAndRetentionConfig(), new TenantConfig("DefaultTenant", "DefaultTenant", null), new IndexingConfig(), new TableCustomConfig(null), null, null, null, null, Map.of("OFFLINE", config), null, null, null, null, null, null, false, null, null, null); Assert.assertFalse(TableConfigUtils.isTableUsingInstancePoolAndReplicaGroup(tableConfig)); }
public static boolean isEnableAuthentication() { return enableAuthentication; }
@Test public void assertGetEnableAuthentication() { Assert.isTrue(AuthUtil.isEnableAuthentication()); }
public Set<String> getMatchKeys() { return routerConfig.isUseRequestRouter() ? requestTags : RuleUtils.getMatchKeys(); }
@Test public void testGetMatchKeys() { config.setUseRequestRouter(false); Match match = new Match(); match.setHeaders(Collections.singletonMap("bar", Collections.singletonList(new MatchRule()))); Rule rule = new Rule(); rule.setMatch(match); EntireRule entireRule = new EntireRule(); entireRule.setKind(RouterConstant.FLOW_MATCH_KIND); entireRule.setRules(Collections.singletonList(rule)); RouterConfiguration configuration = new RouterConfiguration(); configuration.updateServiceRule("foo", Collections.singletonList(entireRule)); RuleUtils.initKeys(configuration); DubboConfigServiceImpl dubboConfigService = new DubboConfigServiceImpl(); Set<String> headerKeys = dubboConfigService.getMatchKeys(); Assert.assertEquals(1, headerKeys.size()); // 清除缓存 RuleUtils.initMatchKeys(new RouterConfiguration()); RuleUtils.initKeys(configuration); SpringConfigServiceImpl springConfigService = new SpringConfigServiceImpl(); headerKeys = springConfigService.getMatchKeys(); Assert.assertEquals(1, headerKeys.size()); }
@Override public Set<Entry<K, V>> entrySet() { Set<Entry<K, V>> entries = Sets.newHashSet(); items.entrySet(). forEach(e -> entries.add(Maps.immutableEntry(serializer.decode(e.getKey()), serializer.decode(e.getValue())))); return entries; }
@Test public void testEntrySet() throws Exception { //Test entry set generation (violates abstraction by knowing the type of the returned entries) fillMap(10); Set<Map.Entry<Integer, Integer>> entries = map.entrySet(); for (int i = 0; i < 10; i++) { assertTrue("The key set doesn't contain all keys 0-9", entries.contains(Maps.immutableEntry(i, i))); } assertEquals("The key set has an incorrect number of entries", 10, entries.size()); }
Set<String> getPackageIdentifiersFromComponentOf(JavaClass javaClass) { return getPackageIdentifiersFromComponentOf(getComponentOf(javaClass)); }
@Test public void get_package_identifier_associated_with_class() { String expectedPackageIdentifier = SomeOriginClass.class.getPackage().getName().replaceAll(".*\\.", ".."); JavaClassDiagramAssociation javaClassDiagramAssociation = createAssociation(TestDiagram.in(temporaryFolder) .component("A").withStereoTypes(expectedPackageIdentifier) .component("B").withStereoTypes("..noclasshere") .write()); JavaClass clazz = importClassWithContext(SomeOriginClass.class); assertThat(javaClassDiagramAssociation.getPackageIdentifiersFromComponentOf(clazz)) .as("package identifiers of " + clazz.getName()) .containsOnly(expectedPackageIdentifier); }
public static DataSource createDataSource(final ModeConfiguration modeConfig) throws SQLException { return createDataSource(DefaultDatabase.LOGIC_NAME, modeConfig); }
@Test void assertCreateDataSourceWithAllParametersForMultipleDataSources() throws SQLException { assertDataSource(ShardingSphereDataSourceFactory.createDataSource( "test_db", new ModeConfiguration("Standalone", null), Collections.emptyMap(), new LinkedList<>(), new Properties()), "test_db"); }
@Override public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { final Credentials credentials = authentication.get(); if(credentials.isAnonymousLogin()) { if(log.isDebugEnabled()) { log.debug(String.format("Connect with no credentials to %s", host)); } client.setProviderCredentials(null); } else { if(credentials.getTokens().validate()) { if(log.isDebugEnabled()) { log.debug(String.format("Connect with session credentials to %s", host)); } client.setProviderCredentials(new AWSSessionCredentials( credentials.getTokens().getAccessKeyId(), credentials.getTokens().getSecretAccessKey(), credentials.getTokens().getSessionToken())); } else { if(log.isDebugEnabled()) { log.debug(String.format("Connect with basic credentials to %s", host)); } client.setProviderCredentials(new AWSCredentials(credentials.getUsername(), credentials.getPassword())); } } if(host.getCredentials().isPassed()) { log.warn(String.format("Skip verifying credentials with previous successful authentication event for %s", this)); return; } try { final Path home = new DelegatingHomeFeature(new DefaultPathHomeFeature(host)).find(); final Location.Name location = new S3LocationFeature(S3Session.this, regions).getLocation(home); if(log.isDebugEnabled()) { log.debug(String.format("Retrieved region %s", location)); } if(!Location.unknown.equals(location)) { if(log.isDebugEnabled()) { log.debug(String.format("Set default region to %s determined from %s", location, home)); } // host.setProperty("s3.location", location.getIdentifier()); } } catch(AccessDeniedException | InteroperabilityException e) { log.warn(String.format("Failure %s querying region", e)); final Path home = new DefaultHomeFinderService(this).find(); if(log.isDebugEnabled()) { log.debug(String.format("Retrieved %s", home)); } } }
@Test(expected = LoginFailureException.class) public void testConnectCn_North_1() throws Exception { final Host host = new Host(new S3Protocol(), "s3.cn-north-1.amazonaws.com.cn", new Credentials("AWS-QWEZUKJHGVCVBJHG", "uztfjkjnbvcf")); final S3Session session = new S3Session(host); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); session.close(); }
protected void writeOneBatch() throws InterruptedException { // waiting if necessary until an element becomes available currentJournal = journalQueue.take(); long nextJournalId = nextVisibleJournalId; initBatch(); try { this.journal.batchWriteBegin(); while (true) { journal.batchWriteAppend(nextJournalId, currentJournal.getBuffer()); currentBatchTasks.add(currentJournal); nextJournalId += 1; if (shouldCommitNow()) { break; } currentJournal = journalQueue.take(); } } catch (JournalException e) { // abort current task LOG.warn("failed to write batch, will abort current journal {} and commit", currentJournal, e); abortJournalTask(currentJournal, e.getMessage()); } finally { try { // commit journal.batchWriteCommit(); LOG.debug("batch write commit success, from {} - {}", nextVisibleJournalId, nextJournalId); nextVisibleJournalId = nextJournalId; markCurrentBatchSucceed(); } catch (JournalException e) { // abort LOG.warn("failed to commit batch, will abort current {} journals.", currentBatchTasks.size(), e); try { journal.batchWriteAbort(); } catch (JournalException e2) { LOG.warn("failed to abort batch, will ignore and continue.", e); } abortCurrentBatch(e.getMessage()); } } rollJournalAfterBatch(); updateBatchMetrics(); }
@Ignore @Test public void testBatchWriteBeginException() throws Exception { JournalTask task1 = new JournalTask(System.nanoTime(), makeBuffer(10), -1); JournalTask task2 = new JournalTask(System.nanoTime(), makeBuffer(11), -1); journalQueue.add(task1); journalQueue.add(task2); new Expectations(journal) { { journal.batchWriteBegin(); times = 1; result = new JournalException("mock batchWriteBegin exception"); journal.batchWriteAbort(); times = 1; } }; Assert.assertEquals(1, abortedWriter.nextVisibleJournalId); abortedWriter.writeOneBatch(); Assert.assertEquals(1, abortedWriter.nextVisibleJournalId); Assert.assertFalse(task1.get()); Assert.assertEquals(1, journalQueue.size()); Assert.assertEquals(task2, journalQueue.take()); }
static String[] getClassPathElements() { return System.getProperty("java.class.path", ".").split(System.getProperty("path.separator")); }
@Test public void getClassPathElements() { String[] retrieved = ResourceHelper.getClassPathElements(); assertThat(retrieved).isNotNull(); assertThat(retrieved.length == 0).isFalse(); }
public static Object invokeMethod(final Object object, final String method, final Consumer<ReflectiveOperationException> errorCallBack, final Object... args) { try { return MethodUtils.invokeMethod(object, method, args); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { errorCallBack.accept(e); } return null; }
@Test public void testInvokeMethod() { final Reflect reflect = new Reflect(); assertEquals("1", ReflectUtils.invokeMethod(reflect, "methodA")); }
@Operation(summary = "queryClusterListPaging", description = "QUERY_CLUSTER_LIST_PAGING_NOTES") @Parameters({ @Parameter(name = "searchVal", description = "SEARCH_VAL", schema = @Schema(implementation = String.class)), @Parameter(name = "pageSize", description = "PAGE_SIZE", required = true, schema = @Schema(implementation = int.class, example = "20")), @Parameter(name = "pageNo", description = "PAGE_NO", required = true, schema = @Schema(implementation = int.class, example = "1")) }) @GetMapping(value = "/list-paging") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_CLUSTER_ERROR) public Result<PageInfo<ClusterDto>> queryClusterListPaging(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "searchVal", required = false) String searchVal, @RequestParam("pageSize") Integer pageSize, @RequestParam("pageNo") Integer pageNo) { checkPageParams(pageNo, pageSize); searchVal = ParameterUtils.handleEscapes(searchVal); PageInfo<ClusterDto> clusterDtoPageInfo = clusterService.queryClusterListPaging(pageNo, pageSize, searchVal); return Result.success(clusterDtoPageInfo); }
@Test public void testQueryClusterListPaging() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("searchVal", "test"); paramsMap.add("pageSize", "2"); paramsMap.add("pageNo", "2"); MvcResult mvcResult = mockMvc.perform(get("/cluster/list-paging") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); logger.info(result.toString()); Assertions.assertTrue(result != null && result.isSuccess()); logger.info("query list-paging cluster return result:{}", mvcResult.getResponse().getContentAsString()); }
@VisibleForTesting public void validateDictDataExists(Long id) { if (id == null) { return; } DictDataDO dictData = dictDataMapper.selectById(id); if (dictData == null) { throw exception(DICT_DATA_NOT_EXISTS); } }
@Test public void testValidateDictDataExists_notExists() { assertServiceException(() -> dictDataService.validateDictDataExists(randomLongId()), DICT_DATA_NOT_EXISTS); }
public BundleProcessor getProcessor( BeamFnApi.ProcessBundleDescriptor descriptor, List<RemoteInputDestination> remoteInputDesinations) { checkState( !descriptor.hasStateApiServiceDescriptor(), "The %s cannot support a %s containing a state %s.", BundleProcessor.class.getSimpleName(), BeamFnApi.ProcessBundleDescriptor.class.getSimpleName(), Endpoints.ApiServiceDescriptor.class.getSimpleName()); return getProcessor(descriptor, remoteInputDesinations, NoOpStateDelegator.INSTANCE); }
@Test public void handleCleanupWhenProcessingBundleFails() throws Exception { RuntimeException testException = new RuntimeException(); BeamFnDataOutboundAggregator mockInputSender = mock(BeamFnDataOutboundAggregator.class); CompletableFuture<InstructionResponse> processBundleResponseFuture = new CompletableFuture<>(); when(fnApiControlClient.handle(any(BeamFnApi.InstructionRequest.class))) .thenReturn(processBundleResponseFuture); FullWindowedValueCoder<String> coder = FullWindowedValueCoder.of(StringUtf8Coder.of(), Coder.INSTANCE); BundleProcessor processor = sdkHarnessClient.getProcessor( descriptor, Collections.singletonList( RemoteInputDestination.of( (FullWindowedValueCoder) coder, SDK_GRPC_READ_TRANSFORM))); when(dataService.createOutboundAggregator(any(), anyBoolean())).thenReturn(mockInputSender); BundleProgressHandler mockProgressHandler = mock(BundleProgressHandler.class); try { try (RemoteBundle activeBundle = processor.newBundle( ImmutableMap.of( SDK_GRPC_WRITE_TRANSFORM, RemoteOutputReceiver.of(ByteArrayCoder.of(), mock(FnDataReceiver.class))), mockProgressHandler)) { processBundleResponseFuture.completeExceptionally(testException); } fail("Exception expected"); } catch (ExecutionException e) { assertEquals(testException, e.getCause()); // We expect that we don't register the receiver and the next accept call will raise an error // making the data service aware of the error. verify(dataService, never()).unregisterReceiver(any()); assertThrows( "Inbound observer closed.", Exception.class, () -> outputReceiverCaptor.getValue().accept(Elements.getDefaultInstance())); } }
public synchronized File buildPackage(HeliumPackage pkg, boolean rebuild, boolean recopyLocalModule) throws IOException { if (pkg == null) { return null; } String[] moduleNameVersion = getNpmModuleNameAndVersion(pkg); if (moduleNameVersion == null) { LOGGER.warn("Can't get module name and version of package {}", pkg.getName()); return null; } String pkgName = pkg.getName(); File bundleDir = getHeliumPackageDirectory(pkgName); File bundleCache = getHeliumPackageBundleCache(pkgName); if (!rebuild && bundleCache.exists() && !bundleCache.isDirectory()) { return bundleCache; } // 0. install node, npm (should be called before `downloadPackage` try { installNodeAndNpm(); } catch (TaskRunnerException e) { throw new IOException(e); } // 1. prepare directories if (!heliumLocalRepoDirectory.exists() || !heliumLocalRepoDirectory.isDirectory()) { FileUtils.deleteQuietly(heliumLocalRepoDirectory); FileUtils.forceMkdir(heliumLocalRepoDirectory); } FrontendPluginFactory fpf = new FrontendPluginFactory( bundleDir, nodeInstallationDirectory); // resources: webpack.js, package.json String templateWebpackConfig = Resources.toString( Resources.getResource("helium/webpack.config.js"), StandardCharsets.UTF_8); String templatePackageJson = Resources.toString( Resources.getResource("helium/" + PACKAGE_JSON), StandardCharsets.UTF_8); // 2. download helium package using `npm pack` String mainFileName = null; try { mainFileName = downloadPackage(pkg, moduleNameVersion, bundleDir, templateWebpackConfig, templatePackageJson, fpf); } catch (TaskRunnerException e) { throw new IOException(e); } // 3. prepare bundle source prepareSource(pkg, moduleNameVersion, mainFileName); // 4. install node and local modules for a bundle copyFrameworkModulesToInstallPath(recopyLocalModule); // should copy local modules first installNodeModules(fpf); // 5. let's bundle and update cache File heliumBundle = bundleHeliumPackage(fpf, bundleDir); bundleCache.delete(); FileUtils.moveFile(heliumBundle, bundleCache); return bundleCache; }
@Test void bundleLocalPackage() throws IOException, TaskRunnerException { URL res = Resources.getResource("helium/webpack.config.js"); String resDir = new File(res.getFile()).getParent(); String localPkg = resDir + "/../../../src/test/resources/helium/vis1"; HeliumPackage pkg = newHeliumPackage( HeliumType.VISUALIZATION, "vis1", "vis1", localPkg, "", null, "license", "fa fa-coffee"); File bundle = hbf.buildPackage(pkg, true, true); assertTrue(bundle.isFile()); }
public AveragingCombiner() {}
@Test public void averagingCombinerTest() { Dataset<Regressor> abcDataset = abcDataset(); Model<Regressor> fiveModel = fiveTrainer.train(abcDataset); Model<Regressor> threeModel = threeTrainer.train(abcDataset); Model<Regressor> oneModel = oneTrainer.train(abcDataset); List<Model<Regressor>> modelList = new ArrayList<>(); Example<Regressor> testExample = new ArrayExample<>(new Regressor(abc,new double[]{-1,-1,-1}),new String[]{"X_0","X_1","X_2"}, new double[]{1,2,3}); // Combiner predicts the average modelList.add(fiveModel); modelList.add(threeModel); modelList.add(oneModel); WeightedEnsembleModel<Regressor> ensemble = WeightedEnsembleModel.createEnsembleFromExistingModels("average",modelList, averagingCombiner); assertEquals(3,ensemble.getNumModels()); Prediction<Regressor> prediction = ensemble.predict(testExample); Regressor target = new Regressor(abc,new double[]{3,3,3}); assertArrayEquals(prediction.getOutput().getValues(),target.getValues()); modelList.clear(); // Weights affect the averaging modelList.add(fiveModel); modelList.add(threeModel); modelList.add(oneModel); ensemble = WeightedEnsembleModel.createEnsembleFromExistingModels("weighted",modelList, averagingCombiner,new float[]{3,1,1}); assertEquals(3,ensemble.getNumModels()); prediction = ensemble.predict(testExample); target = new Regressor(abc,new double[]{3.8,3.8,3.8}); assertArrayEquals(prediction.getOutput().getValues(),target.getValues()); modelList.clear(); Helpers.testModelSerialization(ensemble,Regressor.class); }
public void loadProperties(Properties properties) { Set<Entry<Object, Object>> entries = properties.entrySet(); for (Entry entry : entries) { String key = (String) entry.getKey(); Object value = entry.getValue(); String[] keySplit = key.split("[.]"); Map<String, Object> target = this; for (int i = 0; i < keySplit.length - 1; i++) { if (!target.containsKey(keySplit[i])) { HashMap subEntry = new HashMap(); target.put(keySplit[i], subEntry); target = subEntry; } else { Object subEntry = target.get(keySplit[i]); if (!(subEntry instanceof Map)) { HashMap replace = new HashMap(); replace.put("_", subEntry); target.put(keySplit[i], replace); } target = (Map<String, Object>) target.get(keySplit[i]); } } if (target.get(keySplit[keySplit.length - 1]) instanceof Map) { ((Map) target.get(keySplit[keySplit.length - 1])).put("_", value); } else { target.put(keySplit[keySplit.length - 1], value); } } }
@Test void testLoadPropertyOverrideDict() { // given K8sSpecTemplate template = new K8sSpecTemplate(); Properties p = new Properties(); p.put("k8s.key1", "v2"); p.put("k8s", "v1"); // when template.loadProperties(p); // then assertEquals("v1", ((Map) template.get("k8s")).get("_")); assertEquals("v2", ((Map) template.get("k8s")).get("key1")); }
@Override public NullsOrderType getDefaultNullsOrderType() { return NullsOrderType.FIRST; }
@Test void assertGetDefaultNullsOrderType() { assertThat(dialectDatabaseMetaData.getDefaultNullsOrderType(), is(NullsOrderType.FIRST)); }
@Override public void onProjectBranchesChanged(Set<Project> projects, Set<String> impactedBranches) { checkNotNull(projects, "projects can't be null"); if (projects.isEmpty()) { return; } Arrays.stream(listeners) .forEach(safelyCallListener(listener -> listener.onProjectBranchesChanged(projects, impactedBranches))); }
@Test @UseDataProvider("oneOrManyProjects") public void onProjectBranchesChanged_calls_all_listeners_in_order_of_addition_to_constructor(Set<Project> projects) { InOrder inOrder = Mockito.inOrder(listener1, listener2, listener3); underTestWithListeners.onProjectBranchesChanged(projects, emptySet()); inOrder.verify(listener1).onProjectBranchesChanged(same(projects), eq(emptySet())); inOrder.verify(listener2).onProjectBranchesChanged(same(projects), eq(emptySet())); inOrder.verify(listener3).onProjectBranchesChanged(same(projects), eq(emptySet())); inOrder.verifyNoMoreInteractions(); }
@Override public boolean matches(ExpressionTree expressionTree, VisitorState state) { if (expressionTree instanceof LiteralTree) { LiteralTree literalTree = (LiteralTree) expressionTree; Object actualValue = literalTree.getValue(); return actualValue instanceof String && matcher.test((String) actualValue); } else { return false; } }
@Test public void matches() { // TODO(b/67738557): consolidate helpers for creating fake trees LiteralTree tree = new LiteralTree() { @Override public Kind getKind() { throw new UnsupportedOperationException(); } @Override public <R, D> R accept(TreeVisitor<R, D> visitor, D data) { throw new UnsupportedOperationException(); } @Override public Object getValue() { return "a string literal"; } }; assertThat(new StringLiteral("a string literal").matches(tree, null)).isTrue(); }
@Override public HadoopConf buildHadoopConfWithReadOnlyConfig(ReadonlyConfig readonlyConfig) { Configuration configuration = loadHiveBaseHadoopConfig(readonlyConfig); Config config = fillBucket(readonlyConfig, configuration); config = config.withValue( S3ConfigOptions.S3A_AWS_CREDENTIALS_PROVIDER.key(), ConfigValueFactory.fromAnyRef( configuration.get( S3ConfigOptions.S3A_AWS_CREDENTIALS_PROVIDER.key()))); config = config.withValue( S3ConfigOptions.FS_S3A_ENDPOINT.key(), ConfigValueFactory.fromAnyRef( configuration.get(S3ConfigOptions.FS_S3A_ENDPOINT.key()))); HadoopConf hadoopConf = HiveOnS3Conf.buildWithReadOnlyConfig(ReadonlyConfig.fromConfig(config)); Map<String, String> propsWithPrefix = configuration.getPropsWithPrefix(StringUtils.EMPTY); hadoopConf.setExtraOptions(propsWithPrefix); return hadoopConf; }
@Test void fillBucketInHadoopConf() { S3Storage s3Storage = new S3Storage(); HadoopConf s3aConf = s3Storage.buildHadoopConfWithReadOnlyConfig(S3A); assertHadoopConfForS3a(s3aConf); HadoopConf s3Conf = s3Storage.buildHadoopConfWithReadOnlyConfig(S3); Assertions.assertTrue(s3Conf instanceof HiveOnS3Conf); Assertions.assertEquals(s3Conf.getSchema(), "s3"); Assertions.assertEquals( s3Conf.getFsHdfsImpl(), "com.amazon.ws.emr.hadoop.fs.EmrFileSystem"); }
public void handle(SeckillWebMockRequestDTO request) { prePreRequestHandlers.stream().sorted(Comparator.comparing(Ordered::getOrder)) .forEach(it -> { try { it.handle(request); } catch (Exception e) { log.warn("pre request handler error", e); } }); }
@Test public void shouldHandleRequestSuccessfully() { SeckillWebMockRequestDTO request = new SeckillWebMockRequestDTO(); doNothing().when(handler1).handle(request); doNothing().when(handler2).handle(request); preRequestPipeline.handle(request); verify(handler1, times(1)).handle(request); verify(handler2, times(1)).handle(request); }
public <R> TraceContext.Injector<R> newInjector(Setter<R, String> setter) { if (setter == null) throw new NullPointerException("setter == null"); if (setter instanceof RemoteSetter) { RemoteSetter<?> remoteSetter = (RemoteSetter<?>) setter; switch (remoteSetter.spanKind()) { case CLIENT: return new RemoteInjector<R>(setter, clientInjectorFunction); case PRODUCER: return new RemoteInjector<R>(setter, producerInjectorFunction); case CONSUMER: return new RemoteInjector<R>(setter, consumerInjectorFunction); default: // SERVER is nonsense as it cannot be injected } } return new DeferredInjector<R>(setter, this); }
@Test void twoFunctions_injects_deferred() { DeferredInjector<Object> deferredInjector = (DeferredInjector<Object>) twoFunctions.newInjector(setter); for (Kind kind : injectableKinds) { when(request.spanKind()).thenReturn(kind); deferredInjector.inject(context, request); assertThat(oneCount.getAndSet(0)).isOne(); assertThat(twoCount.getAndSet(0)).isOne(); } deferredInjector.inject(context, notRequest); assertThat(oneCount.getAndSet(0)).isOne(); assertThat(twoCount.getAndSet(0)).isOne(); // works with nonsense when(request.spanKind()).thenReturn(Kind.SERVER); deferredInjector.inject(context, request); assertThat(oneCount.getAndSet(0)).isOne(); assertThat(twoCount.getAndSet(0)).isOne(); }
public static String[] splitToSteps(String path, boolean preserveRootAsStep) { if (path == null) { return null; } if (preserveRootAsStep && path.equals(SHARE_ROOT)) { return new String[] { SHARE_ROOT }; } var includeRoot = preserveRootAsStep && path.startsWith(SHARE_ROOT); if (!includeRoot) { path = ensureRelative(path); } // no ambiguity such as "/|\\\\" var pathSteps = path.split("" + PATH_SEPARATOR); if (includeRoot) { pathSteps[0] = SHARE_ROOT; // replace leading "" } return pathSteps; }
@Test void splitRelativePreservingRootShouldReturnStepsOnly() { assertArrayEquals(new String[] { "1", "2" }, FilesPath.splitToSteps("1/2", true)); }
protected static synchronized void registerAdaptiveProtocol(ProtocolInfo protocolInfo) { // 取最大偏移量 maxMagicOffset = Math.max(maxMagicOffset, protocolInfo.magicFieldOffset() + protocolInfo.magicFieldLength()); }
@Test public void registerAdaptiveProtocol() throws Exception { }
public static ViewMetadata fromJson(String metadataLocation, String json) { return JsonUtil.parse(json, node -> ViewMetadataParser.fromJson(metadataLocation, node)); }
@Test public void viewMetadataWithMultipleSQLsForDialectShouldBeReadable() throws Exception { ViewVersion viewVersion = ImmutableViewVersion.builder() .versionId(1) .timestampMillis(4353L) .summary(ImmutableMap.of("user", "some-user")) .schemaId(0) .defaultCatalog("some-catalog") .defaultNamespace(Namespace.empty()) .addRepresentations( ImmutableSQLViewRepresentation.builder() .sql("select 'foo' foo") .dialect("spark-sql") .build()) .addRepresentations( ImmutableSQLViewRepresentation.builder() .sql("select * from foo") .dialect("spark-sql") .build()) .build(); String json = readViewMetadataInputFile( "org/apache/iceberg/view/ViewMetadataMultipleSQLsForDialect.json"); // builder will throw an exception due to having multiple SQLs for the same dialect, thus // construct the expected view metadata directly ViewMetadata expectedViewMetadata = ImmutableViewMetadata.of( "fa6506c3-7681-40c8-86dc-e36561f83385", 1, "s3://bucket/test/location", ImmutableList.of(TEST_SCHEMA), 1, ImmutableList.of(viewVersion), ImmutableList.of( ImmutableViewHistoryEntry.builder().versionId(1).timestampMillis(4353).build()), ImmutableMap.of("some-key", "some-value"), ImmutableList.of(), null); // reading view metadata with multiple SQLs for the same dialects shouldn't fail ViewMetadata actual = ViewMetadataParser.fromJson(json); assertThat(actual) .usingRecursiveComparison() .ignoringFieldsOfTypes(Schema.class) .isEqualTo(expectedViewMetadata); }
public void executor(final ConfigGroupEnum type, final String json, final String eventType) { ENUM_MAP.get(type).handle(json, eventType); }
@Test public void testPluginCreateExecutor() { String json = getJson(); websocketDataHandler.executor(ConfigGroupEnum.PLUGIN, json, DataEventTypeEnum.CREATE.name()); List<PluginData> pluginDataList = new PluginDataHandler(pluginDataSubscriber).convert(json); pluginDataList.forEach(verify(pluginDataSubscriber)::onSubscribe); }
public FEELFnResult<Map<String, Object>> invoke(@ParameterName("entries") List<Object> entries) { if (entries == null) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "entries", "cannot be null")); } Map<String, Object> result = new HashMap<>(); for (int i = 0; i < entries.size(); i++) { final int h_index = i + 1; if (entries.get(i) instanceof Map) { Map<?, ?> map = (Map<?, ?>) entries.get(i); String key; Object value; if (map.get("key") instanceof String) { key = (String) map.get("key"); } else { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "entry of index " + (h_index) + " is missing a `key` entry")); } if (map.containsKey("value")) { value = map.get("value"); } else { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "entry of index " + (h_index) + " is missing a `value` entry")); } if (result.containsKey(key)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "entry of index " + (h_index) + " contains duplicate key")); } result.put(key, value); } else { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "entry of index " + (h_index) + " is not a valid context")); } } return FEELFnResult.ofResult(result); }
@Test void invokeDuplicateKey() { FunctionTestUtil.assertResultError(contextFunction.invoke(List.of( Map.of("key", "name", "value", "John Doe"), Map.of("key", "name", "value", "John Doe"))), InvalidParametersEvent.class); FunctionTestUtil.assertResultNotError(contextFunction.invoke(List.of( Map.of("key", "name", "value", "John Doe"), Map.of("key", "age", "value", 12)))); }
@GET @Produces(MediaType.APPLICATION_JSON) @Path("{networkId}/devices/{deviceId}/ports") public Response getVirtualPorts(@PathParam("networkId") long networkId, @PathParam("deviceId") String deviceId) { NetworkId nid = NetworkId.networkId(networkId); Iterable<VirtualPort> vports = vnetService.getVirtualPorts(nid, DeviceId.deviceId(deviceId)); return ok(encodeArray(VirtualPort.class, "ports", vports)).build(); }
@Test public void testGetVirtualPortsEmptyArray() { NetworkId networkId = networkId4; DeviceId deviceId = devId2; expect(mockVnetService.getVirtualPorts(networkId, deviceId)) .andReturn(ImmutableSet.of()).anyTimes(); replay(mockVnetService); WebTarget wt = target(); String location = "vnets/" + networkId.toString() + "/devices/" + deviceId.toString() + "/ports"; String response = wt.path(location).request().get(String.class); assertThat(response, is("{\"ports\":[]}")); verify(mockVnetService); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); final Calendar calendar = readDateTime(data, 0); if (calendar == null) { onInvalidDataReceived(device, data); return; } onDateTimeReceived(device, calendar); }
@Test public void onInvalidDataReceived_dataTooLong() { final DataReceivedCallback callback = new DateTimeDataCallback() { @Override public void onDateTimeReceived(@NonNull final BluetoothDevice device, @NonNull final Calendar calendar) { assertEquals("Incorrect Date and Time reported as correct", 1, 2); } @Override public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Incorrect Date and Time size", 0, data.size()); } }; final Data data = new Data(); callback.onDataReceived(null, data); }
static <T> void compactNulls(ArrayList<T> list) { list.removeIf(Objects::isNull); }
@Test public void shouldCompactNulls2() { ArrayList<Integer> listBad = (ArrayList) Lists.newArrayList(1); MutableFSTImpl.compactNulls(listBad); assertEquals(Lists.newArrayList(1), listBad); }
static SegmentStatus getSegmentStatus(Path path) { try (RecordIOReader ioReader = new RecordIOReader(path)) { boolean moreEvents = true; SegmentStatus segmentStatus = SegmentStatus.EMPTY; while (moreEvents) { // If all events in the segment can be read, then assume that this is a valid segment moreEvents = (ioReader.readEvent() != null); if (moreEvents) segmentStatus = SegmentStatus.VALID; } return segmentStatus; } catch (IOException | IllegalStateException e) { logger.warn("Error reading segment file {}", path, e); return SegmentStatus.INVALID; } }
@Test public void testPartiallyWrittenSegment() throws Exception { try(RecordIOWriter writer = new RecordIOWriter(file)) { writer.writeRecordHeader( new RecordHeader(RecordType.COMPLETE, 100, OptionalInt.empty(), 0)); } assertThat(RecordIOReader.getSegmentStatus(file), is(RecordIOReader.SegmentStatus.INVALID)); }
public MonitorBuilder interval(String interval) { this.interval = interval; return getThis(); }
@Test void interval() { MonitorBuilder builder = MonitorBuilder.newBuilder(); builder.interval("interval"); Assertions.assertEquals("interval", builder.build().getInterval()); }
public Set<Integer> nodesThatShouldBeDown(ClusterState state) { return calculate(state).nodesThatShouldBeDown(); }
@Test void group_node_down_edge_implicitly_marks_down_rest_of_nodes_in_group() { // 3 groups of 2 nodes, take down node #4 (1st node in last group). Since we require // at least 51% of group capacity to be available, implicitly take down the last group // entirely. GroupAvailabilityCalculator calc = calcForHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51); assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:6 storage:6 .4.s:d")), equalTo(indices(5))); }
@Override public float getFloat(final int columnIndex) throws SQLException { return (float) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, float.class), float.class); }
@Test void assertGetFloatWithColumnIndex() throws SQLException { when(mergeResultSet.getValue(1, float.class)).thenReturn(1.0F); assertThat(shardingSphereResultSet.getFloat(1), is(1.0F)); }
boolean isEncodable(DiscreteResource resource) { return resource.valueAs(Object.class) .map(Object::getClass) .map(codecs::containsKey) .orElse(Boolean.FALSE); }
@Test public void isOchNonEncodable() { DiscreteResource resource = Resources.discrete(DID, PN, OCH).resource(); assertThat(sut.isEncodable(resource), is(false)); }
public static <K, V> Write<K, V> write() { return new AutoValue_KafkaIO_Write.Builder<K, V>() .setWriteRecordsTransform(writeRecords()) .build(); }
@Test public void testSinkWithSendErrors() throws Throwable { // similar to testSink(), except that up to 10 of the send calls to producer will fail // asynchronously. // TODO: Ideally we want the pipeline to run to completion by retrying bundles that fail. // We limit the number of errors injected to 10 below. This would reflect a real streaming // pipeline. But I am sure how to achieve that. For now expect an exception: thrown.expect(InjectedErrorException.class); thrown.expectMessage("Injected Error #1"); int numElements = 1000; try (MockProducerWrapper producerWrapper = new MockProducerWrapper(new LongSerializer())) { ProducerSendCompletionThread completionThreadWithErrors = new ProducerSendCompletionThread(producerWrapper.mockProducer, 10, 100).start(); String topic = "test"; p.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata()) .apply( KafkaIO.<Integer, Long>write() .withBootstrapServers("none") .withTopic(topic) .withKeySerializer(IntegerSerializer.class) .withValueSerializer(LongSerializer.class) .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))); try { p.run(); } catch (PipelineExecutionException e) { // throwing inner exception helps assert that first exception is thrown from the Sink throw e.getCause().getCause(); } finally { completionThreadWithErrors.shutdown(); } } }
protected Map<String, ByteBuffer> performTaskAssignment(String leaderId, long maxOffset, Map<String, ExtendedWorkerState> memberConfigs, WorkerCoordinator coordinator, short protocolVersion) { log.debug("Performing task assignment during generation: {} with memberId: {}", coordinator.generationId(), coordinator.memberId()); Map<String, ConnectorsAndTasks> memberAssignments = transformValues( memberConfigs, memberConfig -> new ConnectorsAndTasks.Builder() .with(memberConfig.assignment().connectors(), memberConfig.assignment().tasks()) .build() ); ClusterAssignment clusterAssignment = performTaskAssignment( coordinator.configSnapshot(), coordinator.lastCompletedGenerationId(), coordinator.generationId(), memberAssignments ); coordinator.leaderState(new LeaderState(memberConfigs, clusterAssignment.allAssignedConnectors(), clusterAssignment.allAssignedTasks())); Map<String, ExtendedAssignment> assignments = fillAssignments(memberConfigs.keySet(), Assignment.NO_ERROR, leaderId, memberConfigs.get(leaderId).url(), maxOffset, clusterAssignment, delay, protocolVersion); log.debug("Actual assignments: {}", assignments); return serializeAssignments(assignments, protocolVersion); }
@Test public void testLeaderStateUpdated() { // Sanity test to make sure that the coordinator's leader state is actually updated after a rebalance connectors.clear(); String leader = "followMe"; Map<String, ExtendedWorkerState> workerStates = new HashMap<>(); workerStates.put(leader, new ExtendedWorkerState("followMe:618", CONFIG_OFFSET, ExtendedAssignment.empty())); WorkerCoordinator coordinator = mock(WorkerCoordinator.class); when(coordinator.configSnapshot()).thenReturn(configState()); assignor.performTaskAssignment( leader, CONFIG_OFFSET, workerStates, coordinator, IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2 ); verify(coordinator).leaderState(notNull()); }
public static boolean writeDataObject(String path, Object dataMap) { Yaml yaml = new Yaml(); try (PrintWriter pw = new PrintWriter(path, "UTF-8")) { String dumpAsMap = yaml.dumpAsMap(dataMap); pw.print(dumpAsMap); pw.flush(); } catch (Exception e) { throw new AclException(e.getMessage(), e); } return true; }
@Test public void writeDataObject2YamlFileTest() throws IOException { String targetFileName = randomTmpFile(); File transport = new File(targetFileName); Assert.assertTrue(transport.createNewFile()); transport.deleteOnExit(); PlainAccessData aclYamlMap = new PlainAccessData(); // For globalWhiteRemoteAddrs element in acl yaml config file List<String> globalWhiteRemoteAddrs = new ArrayList<>(); globalWhiteRemoteAddrs.add("10.10.103.*"); globalWhiteRemoteAddrs.add("192.168.0.*"); aclYamlMap.setGlobalWhiteRemoteAddresses(globalWhiteRemoteAddrs); // For accounts element in acl yaml config file List<PlainAccessConfig> accounts = new ArrayList<>(); PlainAccessConfig accountsMap = new PlainAccessConfig() { { setAccessKey("RocketMQ"); setSecretKey("12345678"); setWhiteRemoteAddress("whiteRemoteAddress"); setAdmin(true); } }; accounts.add(accountsMap); aclYamlMap.setAccounts(accounts); Assert.assertTrue(AclUtils.writeDataObject(targetFileName, aclYamlMap)); }
@Override public String toString() { return "TableProcessorNode{" + ", processorParameters=" + processorParameters + ", storeFactory=" + (storeFactory == null ? "null" : storeFactory.name()) + ", storeNames=" + Arrays.toString(storeNames) + "} " + super.toString(); }
@Test public void shouldConvertToStringWithNullStoreBuilder() { final TableProcessorNode<String, String> node = new TableProcessorNode<>( "name", new ProcessorParameters<>(TestProcessor::new, "processor"), null, new String[]{"store1", "store2"} ); final String asString = node.toString(); final String expected = "storeFactory=null"; assertTrue( asString.contains(expected), String.format( "Expected toString to return string with \"%s\", received: %s", expected, asString) ); }
@Override public ResultSet getExportedKeys(final String catalog, final String schema, final String table) { return null; }
@Test void assertGetExportedKeys() { assertNull(metaData.getExportedKeys("", "", "")); }
@SuppressWarnings("unchecked") public static <T extends Message> T newMessageByProtoClassName(final String className, final byte[] bs) { final MethodHandle handle = PARSE_METHODS_4PROTO.get(className); if (handle == null) { throw new MessageClassNotFoundException(className + " not found"); } try { return (T) handle.invoke(bs); } catch (Throwable t) { throw new SerializationException(t); } }
@Test public void testNewMessage() { SnapshotMeta meta = SnapshotMeta.newBuilder().setLastIncludedIndex(99).setLastIncludedTerm(1).build(); SnapshotMeta pMeta = ProtobufMsgFactory.newMessageByProtoClassName("jraft.SnapshotMeta", meta.toByteArray()); assertNotNull(pMeta); assertNotSame(pMeta, meta); assertEquals(pMeta, meta); }
public static List<UpdateRequirement> forReplaceView( ViewMetadata base, List<MetadataUpdate> metadataUpdates) { Preconditions.checkArgument(null != base, "Invalid view metadata: null"); Preconditions.checkArgument(null != metadataUpdates, "Invalid metadata updates: null"); Builder builder = new Builder(null, false); builder.require(new UpdateRequirement.AssertViewUUID(base.uuid())); metadataUpdates.forEach(builder::update); return builder.build(); }
@Test public void addViewVersion() { List<UpdateRequirement> requirements = UpdateRequirements.forReplaceView( viewMetadata, ImmutableList.of( new MetadataUpdate.AddViewVersion( ImmutableViewVersion.builder() .versionId(1) .schemaId(1) .timestampMillis(System.currentTimeMillis()) .defaultNamespace(Namespace.of("ns")) .build()), new MetadataUpdate.AddViewVersion( ImmutableViewVersion.builder() .versionId(2) .schemaId(1) .timestampMillis(System.currentTimeMillis()) .defaultNamespace(Namespace.of("ns")) .build()), new MetadataUpdate.AddViewVersion( ImmutableViewVersion.builder() .versionId(3) .schemaId(1) .timestampMillis(System.currentTimeMillis()) .defaultNamespace(Namespace.of("ns")) .build()))); requirements.forEach(req -> req.validate(viewMetadata)); assertThat(requirements) .hasSize(1) .hasOnlyElementsOfTypes(UpdateRequirement.AssertViewUUID.class); assertViewUUID(requirements); }
public static WorkflowInstanceAggregatedInfo computeAggregatedView( WorkflowInstance workflowInstance, boolean statusKnown) { if (workflowInstance == null) { // returning empty object since cannot access state of the current instance run return new WorkflowInstanceAggregatedInfo(); } WorkflowInstanceAggregatedInfo instanceAggregated = computeAggregatedViewNoStatus(workflowInstance); if (statusKnown || workflowInstance.getAggregatedInfo() == null) { instanceAggregated.setWorkflowInstanceStatus(workflowInstance.getStatus()); } else { computeAndSetAggregatedInstanceStatus(workflowInstance, instanceAggregated); } return instanceAggregated; }
@Test public void testAggregatedViewWithTwoBranchesFailed() { WorkflowInstance run1 = getGenericWorkflowInstance( 1, WorkflowInstance.Status.FAILED, RunPolicy.START_FRESH_NEW_RUN, null); Workflow runtimeWorkflow = mock(Workflow.class); Map<String, StepRuntimeState> decodedOverview = new LinkedHashMap<>(); decodedOverview.put("step1", generateStepState(StepInstance.Status.SUCCEEDED, 1L, 2L)); decodedOverview.put("step2", generateStepState(StepInstance.Status.FATALLY_FAILED, 3L, 4L)); decodedOverview.put("step3", generateStepState(StepInstance.Status.FATALLY_FAILED, 5L, 6L)); WorkflowRuntimeOverview overview = mock(WorkflowRuntimeOverview.class); doReturn(decodedOverview).when(overview).decodeStepOverview(run1.getRuntimeDag()); run1.setRuntimeOverview(overview); run1.setRuntimeWorkflow(runtimeWorkflow); WorkflowInstanceAggregatedInfo aggregated = AggregatedViewHelper.computeAggregatedView(run1, false); assertEquals(1L, aggregated.getStepAggregatedViews().get("step1").getStartTime().longValue()); assertEquals(3L, aggregated.getStepAggregatedViews().get("step2").getStartTime().longValue()); assertEquals(5L, aggregated.getStepAggregatedViews().get("step3").getStartTime().longValue()); assertEquals(WorkflowInstance.Status.FAILED, aggregated.getWorkflowInstanceStatus()); WorkflowInstance run2 = getGenericWorkflowInstance( 2, WorkflowInstance.Status.SUCCEEDED, RunPolicy.RESTART_FROM_SPECIFIC, RestartPolicy.RESTART_FROM_BEGINNING); RestartConfig config = RestartConfig.builder() .restartPolicy(RunPolicy.RESTART_FROM_SPECIFIC) .addRestartNode(run2.getWorkflowId(), run2.getWorkflowInstanceId(), "step2") .build(); run2.getRunConfig().setRestartConfig(config); Map<String, StepRuntimeState> decodedOverview2 = new LinkedHashMap<>(); decodedOverview2.put("step2", generateStepState(StepInstance.Status.SUCCEEDED, 11L, 12L)); Map<String, StepTransition> run2Dag = new LinkedHashMap<>(); run2Dag.put("step2", new StepTransition()); run2Dag.put("step3", new StepTransition()); run2.setRuntimeDag(run2Dag); doReturn(run1) .when(workflowInstanceDao) .getWorkflowInstanceRun(run2.getWorkflowId(), run2.getWorkflowInstanceId(), 1L); run2.setAggregatedInfo(AggregatedViewHelper.computeAggregatedView(run1, false)); assertEquals(3, run2.getAggregatedInfo().getStepAggregatedViews().size()); assertEquals( StepInstance.Status.SUCCEEDED, run2.getAggregatedInfo().getStepAggregatedViews().get("step1").getStatus()); assertEquals( StepInstance.Status.FATALLY_FAILED, run2.getAggregatedInfo().getStepAggregatedViews().get("step2").getStatus()); assertEquals( StepInstance.Status.FATALLY_FAILED, run2.getAggregatedInfo().getStepAggregatedViews().get("step3").getStatus()); assertEquals( 1L, run2.getAggregatedInfo().getStepAggregatedViews().get("step1").getStartTime().longValue()); assertEquals( 3L, run2.getAggregatedInfo().getStepAggregatedViews().get("step2").getStartTime().longValue()); assertEquals(5L, aggregated.getStepAggregatedViews().get("step3").getStartTime().longValue()); WorkflowRuntimeOverview wro2 = mock(WorkflowRuntimeOverview.class); doReturn(decodedOverview2).when(wro2).decodeStepOverview(run2.getRuntimeDag()); run2.setRuntimeOverview(wro2); run2.setRuntimeWorkflow(runtimeWorkflow); WorkflowInstanceAggregatedInfo aggregated2 = AggregatedViewHelper.computeAggregatedView(run2, false); assertEquals(3, aggregated2.getStepAggregatedViews().size()); assertEquals( StepInstance.Status.SUCCEEDED, aggregated2.getStepAggregatedViews().get("step1").getStatus()); assertEquals( StepInstance.Status.SUCCEEDED, aggregated2.getStepAggregatedViews().get("step2").getStatus()); assertEquals( StepInstance.Status.FATALLY_FAILED, aggregated2.getStepAggregatedViews().get("step3").getStatus()); assertEquals(1L, aggregated2.getStepAggregatedViews().get("step1").getStartTime().longValue()); assertEquals(11L, aggregated2.getStepAggregatedViews().get("step2").getStartTime().longValue()); assertEquals(5L, aggregated2.getStepAggregatedViews().get("step3").getStartTime().longValue()); assertEquals(WorkflowInstance.Status.FAILED, aggregated2.getWorkflowInstanceStatus()); }
@Override public void execute(Context context) { try (CloseableIterator<DefaultIssue> issues = protoIssueCache.traverse()) { while (issues.hasNext()) { DefaultIssue issue = issues.next(); if (shouldUpdateIndexForIssue(issue)) { changedIssuesRepository.addIssueKey(issue.key()); } } } }
@Test public void execute_whenIssueIsToBeMigratedAsNewCodeReferenceIssue_shouldLoadIssue() { when(periodHolder.hasPeriod()).thenReturn(true); when(periodHolder.getPeriod()).thenReturn(new Period("REFERENCE_BRANCH", null, null)); protoIssueCache.newAppender() .append(newDefaultIssue() .setIsOnChangedLine(true) .setIsNewCodeReferenceIssue(false) .setIsNoLongerNewCodeReferenceIssue(false) .setNew(false) .setCopied(false) .setChanged(false)) .close(); underTest.execute(mock(ComputationStep.Context.class)); verify(changedIssuesRepository).addIssueKey("issueKey1"); }
@Override public void subscribe(final Subscriber<? super Row> subscriber) { if (polling) { throw new IllegalStateException("Cannot set subscriber if polling"); } synchronized (this) { subscribing = true; super.subscribe(subscriber); } }
@Test public void shouldAllowSubscribeIfComplete() throws Exception { // Given givenPublisherAcceptsOneRow(); completeQueryResult(); // When subscribe(); subscription.request(1); // Then assertThatEventually(() -> subscriberReceivedRow, is(true)); assertThatEventually(() -> subscriberCompleted, is(true)); }
public <T> List<T> getList(String path) { return get(path); }
@Test public void getList() { final List<String> categories = new JsonPath(JSON).get("store.book.category"); assertThat(categories.size(), equalTo(4)); assertThat(categories, hasItems("reference", "fiction")); }
@VisibleForTesting TransMeta filterPrivateDatabases( TransMeta transMeta ) { Set<String> privateDatabases = transMeta.getPrivateDatabases(); if ( privateDatabases != null ) { // keep only private transformation databases for ( Iterator<DatabaseMeta> it = transMeta.getDatabases().iterator(); it.hasNext(); ) { DatabaseMeta databaseMeta = it.next(); String databaseName = databaseMeta.getName(); if ( !privateDatabases.contains( databaseName ) && !transMeta.isDatabaseConnectionUsed( databaseMeta ) ) { it.remove(); } } } return transMeta; }
@Test public void filterPrivateDatabasesNoPrivateDatabaseTest() { IUnifiedRepository purMock = mock( IUnifiedRepository.class ); TransMeta transMeta = new TransMeta( ); transMeta.setDatabases( getDummyDatabases() ); transMeta.setPrivateDatabases( new HashSet<>( ) ); StreamToTransNodeConverter transConverter = new StreamToTransNodeConverter( purMock ); assertEquals( 0, transConverter.filterPrivateDatabases( transMeta ).getDatabases().size() ); }
@Override public synchronized void init(ProcessingEnvironment processingEnv) { super.init(processingEnv); this.koraAppElement = this.elements.getTypeElement(CommonClassNames.koraApp.canonicalName()); if (this.koraAppElement == null) { return; } this.moduleElement = this.elements.getTypeElement(CommonClassNames.module.canonicalName()); this.koraSubmoduleElement = this.elements.getTypeElement(CommonClassNames.koraSubmodule.canonicalName()); this.componentElement = this.elements.getTypeElement(CommonClassNames.component.canonicalName()); this.initialized = true; this.ctx = new ProcessingContext(processingEnv); log.info("@KoraApp processor started"); }
@Test void testGenericCase() throws Throwable { var graphDraw = testClass(AppWithComponents.class); var graph = graphDraw.init(); Assertions.assertThat(graphDraw.getNodes()).hasSize(5); }
@Override public void profileSetOnce(JSONObject properties) { }
@Test public void testProfileSetOnce() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); JSONObject jsonObject = new JSONObject(); try { jsonObject.put("abcd", "123"); jsonObject.put("abcd2", "1232"); } catch (JSONException e) { e.printStackTrace(); } mSensorsAPI.profileSetOnce(jsonObject); }
@SuppressWarnings({"unchecked", "rawtypes"}) public static int compareTo(final Comparable thisValue, final Comparable otherValue, final OrderDirection orderDirection, final NullsOrderType nullsOrderType, final boolean caseSensitive) { if (null == thisValue && null == otherValue) { return 0; } if (null == thisValue) { return NullsOrderType.FIRST == nullsOrderType ? -1 : 1; } if (null == otherValue) { return NullsOrderType.FIRST == nullsOrderType ? 1 : -1; } if (!caseSensitive && thisValue instanceof String && otherValue instanceof String) { return compareToCaseInsensitiveString((String) thisValue, (String) otherValue, orderDirection); } return OrderDirection.ASC == orderDirection ? thisValue.compareTo(otherValue) : -thisValue.compareTo(otherValue); }
@Test void assertCompareToWhenAsc() { assertThat(CompareUtils.compareTo(1, 2, OrderDirection.ASC, NullsOrderType.FIRST, caseSensitive), is(-1)); }
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) { log.info("Starting to validate internal topics {}.", topicConfigs.keySet()); final long now = time.milliseconds(); final long deadline = now + retryTimeoutMs; final ValidationResult validationResult = new ValidationResult(); final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet()); final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet()); while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) { Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap(); if (!topicDescriptionsStillToValidate.isEmpty()) { final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate); descriptionsForTopic = describeTopicsResult.topicNameValues(); } Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap(); if (!topicConfigsStillToValidate.isEmpty()) { final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs( topicConfigsStillToValidate.stream() .map(topic -> new ConfigResource(Type.TOPIC, topic)) .collect(Collectors.toSet()) ); configsForTopic = describeConfigsResult.values().entrySet().stream() .collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue)); } while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { if (!descriptionsForTopic.isEmpty()) { doValidateTopic( validationResult, descriptionsForTopic, topicConfigs, topicDescriptionsStillToValidate, (streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide) ); } if (!configsForTopic.isEmpty()) { doValidateTopic( validationResult, configsForTopic, topicConfigs, topicConfigsStillToValidate, (streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide) ); } maybeThrowTimeoutException( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, String.format("Could not validate internal topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs) ); if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { Utils.sleep(100); } } maybeSleep( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, "validated" ); } log.info("Completed validation of internal topics {}.", topicConfigs.keySet()); return validationResult; }
@Test public void shouldRetryWhenCallsThrowTimeoutExceptionDuringValidation() { setupTopicInMockAdminClient(topic1, repartitionTopicConfig()); mockAdminClient.timeoutNextRequest(2); final InternalTopicConfig internalTopicConfig = setupRepartitionTopicConfig(topic1, 1); final ValidationResult validationResult = internalTopicManager.validate(Collections.singletonMap(topic1, internalTopicConfig)); assertThat(validationResult.missingTopics(), empty()); assertThat(validationResult.misconfigurationsForTopics(), anEmptyMap()); }
public static ParsedCommand parse( // CHECKSTYLE_RULES.ON: CyclomaticComplexity final String sql, final Map<String, String> variables) { validateSupportedStatementType(sql); final String substituted; try { substituted = VariableSubstitutor.substitute(KSQL_PARSER.parse(sql).get(0), variables); } catch (ParseFailedException e) { throw new MigrationException(String.format( "Failed to parse the statement. Statement: %s. Reason: %s", sql, e.getMessage())); } final SqlBaseParser.SingleStatementContext statementContext = KSQL_PARSER.parse(substituted) .get(0).getStatement(); final boolean isStatement = StatementType.get(statementContext.statement().getClass()) == StatementType.STATEMENT; return new ParsedCommand(substituted, isStatement ? Optional.empty() : Optional.of(new AstBuilder(TypeRegistry.EMPTY) .buildStatement(statementContext))); }
@Test public void shouldParseAssertSchema() { // Given: final String assertTopics = "assert schema id 3; assert not exists schema subject 'abcd' timeout 4 minutes;" + "assert schema subject ${subject} id ${id} timeout 10 seconds;"; // When: List<CommandParser.ParsedCommand> commands = parse(assertTopics, ImmutableMap.of("subject", "name", "id", "4")); // Then: assertThat(commands.size(), is(3)); assertThat(commands.get(0).getCommand(), is("assert schema id 3;")); assertThat(commands.get(0).getStatement().isPresent(), is (true)); assertThat(commands.get(0).getStatement().get(), instanceOf(AssertSchema.class)); assertThat(((AssertSchema) commands.get(0).getStatement().get()).getSubject(), is(Optional.empty())); assertThat(((AssertSchema) commands.get(0).getStatement().get()).getId().get(), is(3)); assertThat(((AssertSchema) commands.get(0).getStatement().get()).checkExists(), is(true)); assertThat(((AssertSchema) commands.get(0).getStatement().get()).getTimeout(), is(Optional.empty())); assertThat(commands.get(1).getCommand(), is( "assert not exists schema subject 'abcd' timeout 4 minutes;")); assertThat(commands.get(1).getStatement().isPresent(), is (true)); assertThat(commands.get(1).getStatement().get(), instanceOf(AssertSchema.class)); assertThat(((AssertSchema) commands.get(1).getStatement().get()).getSubject().get(), is("abcd")); assertThat(((AssertSchema) commands.get(1).getStatement().get()).getId(), is(Optional.empty())); assertThat(((AssertSchema) commands.get(1).getStatement().get()).checkExists(), is(false)); assertThat(((AssertSchema) commands.get(1).getStatement().get()).getTimeout().get(), is(WindowTimeClause.of(4, TimeUnit.MINUTES.name()))); assertThat(commands.get(2).getCommand(), is( "assert schema subject name id 4 timeout 10 seconds;")); assertThat(commands.get(2).getStatement().isPresent(), is (true)); assertThat(commands.get(2).getStatement().get(), instanceOf(AssertSchema.class)); assertThat(((AssertSchema) commands.get(2).getStatement().get()).getSubject().get(), is("name")); assertThat(((AssertSchema) commands.get(2).getStatement().get()).getId().get(), is(4)); assertThat(((AssertSchema) commands.get(2).getStatement().get()).checkExists(), is(true)); assertThat(((AssertSchema) commands.get(2).getStatement().get()).getTimeout().get(), is(WindowTimeClause.of(10, TimeUnit.SECONDS.name()))); }
@Override public int getColumnLength(final Object value) { throw new UnsupportedSQLOperationException("PostgreSQLInt8ArrayBinaryProtocolValue.getColumnLength()"); }
@Test void assertGetColumnLength() { assertThrows(UnsupportedSQLOperationException.class, () -> newInstance().getColumnLength("val")); }
public WorkflowInstance createWorkflowInstance( Workflow workflowDef, Long internalId, long workflowVersionId, RunProperties runProperties, RunRequest runRequest) { WorkflowInstance instance = new WorkflowInstance(); instance.setWorkflowId(workflowDef.getId()); instance.setInternalId(internalId); instance.setWorkflowVersionId(workflowVersionId); // latest workflow instance id is unknown, update it later. instance.setWorkflowInstanceId(Constants.LATEST_ONE); // set correlation id if request contains it, otherwise, update it later inside DAO instance.setCorrelationId(runRequest.getCorrelationId()); instance.setRunProperties(runProperties); // it includes runtime params and tags. Its dag is versioned dag. Workflow workflow = overrideWorkflowConfig(workflowDef, runRequest); instance.setRuntimeWorkflow(workflow); // update newly created workflow instance updateWorkflowInstance(instance, runRequest); return instance; }
@Test public void testCreateWorkflowInstance() { RunRequest request = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.START_FRESH_NEW_RUN) .build(); Workflow workflow = definition.getWorkflow(); WorkflowInstance instance = workflowHelper.createWorkflowInstance(workflow, 12345L, 1, new RunProperties(), request); assertEquals(workflow.getId(), instance.getWorkflowId()); assertEquals(WorkflowInstance.Status.CREATED, instance.getStatus()); assertEquals(12345L, instance.getInternalId().longValue()); assertNotNull(instance.getParams()); assertNotNull(instance.getWorkflowUuid()); Mockito.verify(paramsManager, Mockito.times(1)).generateMergedWorkflowParams(any(), any()); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestStopRecordingSubscription() { internalEncodeLogHeader(buffer, 0, 12, 32, () -> 10_000_000_000L); final StopRecordingSubscriptionRequestEncoder requestEncoder = new StopRecordingSubscriptionRequestEncoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(22) .correlationId(33) .subscriptionId(888); dissectControlRequest(CMD_IN_STOP_RECORDING_SUBSCRIPTION, buffer, 0, builder); assertEquals("[10.000000000] " + CONTEXT + ": " + CMD_IN_STOP_RECORDING_SUBSCRIPTION.name() + " [12/32]:" + " controlSessionId=22" + " correlationId=33" + " subscriptionId=888", builder.toString()); }
public static StatementExecutorResponse execute( final ConfiguredStatement<AssertSchema> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { return AssertExecutor.execute( statement.getMaskedStatementText(), statement.getStatement(), executionContext.getKsqlConfig().getInt(KSQL_ASSERT_SCHEMA_DEFAULT_TIMEOUT_MS), serviceContext, (stmt, sc) -> assertSchema( sc.getSchemaRegistryClient(), ((AssertSchema) stmt).getSubject(), ((AssertSchema) stmt).getId(), stmt.checkExists()), (str, stmt) -> new AssertSchemaEntity( str, ((AssertSchema) stmt).getSubject(), ((AssertSchema) stmt).getId(), stmt.checkExists()) ); }
@Test public void shouldAssertSchemaById() { // Given final AssertSchema assertSchema = new AssertSchema(Optional.empty(), Optional.empty(), Optional.of(44), Optional.empty(), true); final ConfiguredStatement<AssertSchema> statement = ConfiguredStatement .of(KsqlParser.PreparedStatement.of("", assertSchema), SessionConfig.of(ksqlConfig, ImmutableMap.of())); // When: final Optional<KsqlEntity> entity = AssertSchemaExecutor .execute(statement, mock(SessionProperties.class), engine, serviceContext).getEntity(); // Then: assertThat("expected response!", entity.isPresent()); assertThat(((AssertSchemaEntity) entity.get()).getSubject(), is(Optional.empty())); assertThat(((AssertSchemaEntity) entity.get()).getId(), is(Optional.of(44))); assertThat(((AssertSchemaEntity) entity.get()).getExists(), is(true)); }
@Override public Map<K, V> loadAll(Collection<K> keys) { awaitSuccessfulInit(); Object[] keysArray = keys.toArray(); String sql = queries.loadAll(keys.size()); try (SqlResult queryResult = sqlService.execute(sql, keysArray)) { Iterator<SqlRow> it = queryResult.iterator(); Map<K, V> result = new HashMap<>(); while (it.hasNext()) { SqlRow sqlRow = it.next(); // If there is a single column as the value, return that column as the value if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) { K id = sqlRow.getObject(genericMapStoreProperties.idColumn); result.put(id, sqlRow.getObject(1)); } else { K id = sqlRow.getObject(genericMapStoreProperties.idColumn); //noinspection unchecked V record = (V) toGenericRecord(sqlRow, genericMapStoreProperties); result.put(id, record); } } return result; } }
@Test public void givenRowAndIdColumn_whenLoadAllMultipleItems_thenReturnGenericRecords() { ObjectSpec spec = objectProvider.createObject(mapName, true); objectProvider.insertItems(spec, 2); Properties properties = new Properties(); properties.setProperty(DATA_CONNECTION_REF_PROPERTY, TEST_DATABASE_REF); properties.setProperty(ID_COLUMN_PROPERTY, "person-id"); mapLoader = createMapLoader(properties, hz); Map<Integer, GenericRecord> records = mapLoader.loadAll(newArrayList(0, 1)); assertThat(records).hasSize(2); }
public static boolean isIp4Address(String ipAddress) { try { final InetAddress address = InetAddresses.forString(ipAddress); if (address instanceof Inet4Address) { return true; } } catch (IllegalArgumentException e) { // Absorb exception. } return false; }
@Test public void testValidIp4Address() { assertTrue(DnsClient.isIp4Address("8.8.8.8")); assertTrue(DnsClient.isIp4Address("127.0.0.1")); assertFalse(DnsClient.isIp4Address("t127.0.0.1")); assertFalse(DnsClient.isIp4Address("google.com")); }
public static Type fromHudiType(Schema avroSchema) { Schema.Type columnType = avroSchema.getType(); LogicalType logicalType = avroSchema.getLogicalType(); PrimitiveType primitiveType = null; boolean isConvertedFailed = false; switch (columnType) { case BOOLEAN: primitiveType = PrimitiveType.BOOLEAN; break; case INT: if (logicalType instanceof LogicalTypes.Date) { primitiveType = PrimitiveType.DATE; } else if (logicalType instanceof LogicalTypes.TimeMillis) { primitiveType = PrimitiveType.TIME; } else { primitiveType = PrimitiveType.INT; } break; case LONG: if (logicalType instanceof LogicalTypes.TimeMicros) { primitiveType = PrimitiveType.TIME; } else if (logicalType instanceof LogicalTypes.TimestampMillis || logicalType instanceof LogicalTypes.TimestampMicros) { primitiveType = PrimitiveType.DATETIME; } else { primitiveType = PrimitiveType.BIGINT; } break; case FLOAT: primitiveType = PrimitiveType.FLOAT; break; case DOUBLE: primitiveType = PrimitiveType.DOUBLE; break; case STRING: return ScalarType.createDefaultCatalogString(); case ARRAY: Type type = new ArrayType(fromHudiType(avroSchema.getElementType())); if (type.isArrayType()) { return type; } else { isConvertedFailed = true; break; } case FIXED: case BYTES: if (logicalType instanceof LogicalTypes.Decimal) { int precision = ((LogicalTypes.Decimal) logicalType).getPrecision(); int scale = ((LogicalTypes.Decimal) logicalType).getScale(); return ScalarType.createUnifiedDecimalType(precision, scale); } else { primitiveType = PrimitiveType.VARCHAR; break; } case RECORD: // Struct type List<Schema.Field> fields = avroSchema.getFields(); Preconditions.checkArgument(fields.size() > 0); ArrayList<StructField> structFields = new ArrayList<>(fields.size()); for (Schema.Field field : fields) { String fieldName = field.name(); Type fieldType = fromHudiType(field.schema()); if (fieldType.isUnknown()) { isConvertedFailed = true; break; } structFields.add(new StructField(fieldName, fieldType)); } if (!isConvertedFailed) { return new StructType(structFields); } break; case MAP: Schema value = avroSchema.getValueType(); Type valueType = fromHudiType(value); if (valueType.isUnknown()) { isConvertedFailed = true; break; } if (!isConvertedFailed) { // Hudi map's key must be string return new MapType(ScalarType.createDefaultCatalogString(), valueType); } break; case UNION: List<Schema> nonNullMembers = avroSchema.getTypes().stream() .filter(schema -> !Schema.Type.NULL.equals(schema.getType())) .collect(Collectors.toList()); if (nonNullMembers.size() == 1) { return fromHudiType(nonNullMembers.get(0)); } else { isConvertedFailed = true; break; } case ENUM: default: isConvertedFailed = true; break; } if (isConvertedFailed) { primitiveType = PrimitiveType.UNKNOWN_TYPE; } return ScalarType.createType(primitiveType); }
@Test public void testMapHudiSchema() { Schema.Field field1 = new Schema.Field("field1", Schema.create(Schema.Type.INT), null, null); Schema.Field field2 = new Schema.Field("field2", Schema.create(Schema.Type.STRING), null, null); List<Schema.Field> fields = new LinkedList<>(); fields.add(field1); fields.add(field2); Schema structSchema = Schema.createRecord(fields); Schema mapSchema = Schema.createMap(structSchema); StructField structField1 = new StructField("field1", ScalarType.createType(PrimitiveType.INT)); StructField structField2 = new StructField("field2", ScalarType.createDefaultCatalogString()); ArrayList<StructField> structFields = new ArrayList<>(); structFields.add(structField1); structFields.add(structField2); StructType structType = new StructType(structFields); MapType mapType = new MapType(ScalarType.createDefaultCatalogString(), structType); Assert.assertEquals(mapType, fromHudiType(mapSchema)); mapSchema = Schema.createMap(Schema.create(Schema.Type.NULL)); Assert.assertEquals(UNKNOWN_TYPE, fromHudiType(mapSchema)); }
@Override public SchemaResult getKeySchema( final Optional<String> topicName, final Optional<Integer> schemaId, final FormatInfo expectedFormat, final SerdeFeatures serdeFeatures ) { return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, true); }
@Test public void shouldReturnErrorFromGetKeySchemaIfCanNotConvertToConnectSchema() { // Given: when(schemaTranslator.toColumns(any(), any(), anyBoolean())) .thenThrow(new RuntimeException("it went boom")); // When: final SchemaResult result = supplier.getKeySchema(Optional.of(TOPIC_NAME), Optional.empty(), expectedFormat, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES)); // Then: assertThat(result.schemaAndId, is(Optional.empty())); assertThat(result.failureReason.get().getMessage(), containsString( "Unable to verify if the key schema for topic: some-topic is compatible with ksqlDB.")); assertThat(result.failureReason.get().getMessage(), containsString( "it went boom")); assertThat(result.failureReason.get().getMessage(), containsString(AVRO_SCHEMA)); }
static int assignActiveTaskMovements(final Map<TaskId, SortedSet<ProcessId>> tasksToCaughtUpClients, final Map<TaskId, SortedSet<ProcessId>> tasksToClientByLag, final Map<ProcessId, ClientState> clientStates, final Map<ProcessId, Set<TaskId>> warmups, final AtomicInteger remainingWarmupReplicas) { final BiFunction<ProcessId, TaskId, Boolean> caughtUpPredicate = (client, task) -> taskIsCaughtUpOnClient(task, client, tasksToCaughtUpClients); final ConstrainedPrioritySet caughtUpClientsByTaskLoad = new ConstrainedPrioritySet( caughtUpPredicate, client -> clientStates.get(client).assignedTaskLoad() ); final Queue<TaskMovement> taskMovements = new PriorityQueue<>( Comparator.comparing(TaskMovement::numCaughtUpClients).thenComparing(TaskMovement::task) ); for (final Map.Entry<ProcessId, ClientState> clientStateEntry : clientStates.entrySet()) { final ProcessId client = clientStateEntry.getKey(); final ClientState state = clientStateEntry.getValue(); for (final TaskId task : state.activeTasks()) { // if the desired client is not caught up, and there is another client that _is_ more caught up, then // we schedule a movement, so we can move the active task to a more caught-up client. We'll try to // assign a warm-up to the desired client so that we can move it later on. if (taskIsNotCaughtUpOnClientAndOtherMoreCaughtUpClientsExist(task, client, clientStates, tasksToCaughtUpClients, tasksToClientByLag)) { taskMovements.add(new TaskMovement(task, client, tasksToCaughtUpClients.get(task))); } } caughtUpClientsByTaskLoad.offer(client); } final int movementsNeeded = taskMovements.size(); while (!taskMovements.isEmpty()) { final TaskMovement movement = taskMovements.poll(); // Attempt to find a caught up standby, otherwise find any caught up client, failing that use the most // caught up client. final boolean moved = tryToSwapStandbyAndActiveOnCaughtUpClient(clientStates, caughtUpClientsByTaskLoad, movement) || tryToMoveActiveToCaughtUpClientAndTryToWarmUp(clientStates, warmups, remainingWarmupReplicas, caughtUpClientsByTaskLoad, movement) || tryToMoveActiveToMostCaughtUpClient(tasksToClientByLag, clientStates, warmups, remainingWarmupReplicas, caughtUpClientsByTaskLoad, movement); if (!moved) { throw new IllegalStateException("Tried to move task to more caught-up client as scheduled before but none exist"); } } return movementsNeeded; }
@Test public void shouldNotCountPreviousStandbyTasksTowardsMaxWarmupReplicas() { final int maxWarmupReplicas = 0; final Set<TaskId> allTasks = mkSet(TASK_0_0); final ClientState client1 = getClientStateWithActiveAssignment(mkSet(), mkSet(TASK_0_0), allTasks); client1.assignStandby(TASK_0_0); final ClientState client2 = getClientStateWithActiveAssignment(mkSet(TASK_0_0), mkSet(), allTasks); final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2); final Map<TaskId, SortedSet<ProcessId>> tasksToCaughtUpClients = mkMap( mkEntry(TASK_0_0, mkSortedSet(PID_1)) ); final Map<TaskId, SortedSet<ProcessId>> tasksToClientByLag = mkMap( mkEntry(TASK_0_0, mkOrderedSet(PID_1, PID_2)) ); assertThat( "should have assigned movements", assignActiveTaskMovements( tasksToCaughtUpClients, tasksToClientByLag, clientStates, new TreeMap<>(), new AtomicInteger(maxWarmupReplicas) ), is(1) ); // Even though we have no warmups allowed, we still let client1 take over active processing while // client2 "warms up" because client1 was a caught-up standby, so it can "trade" standby status with // the not-caught-up active client2. // I.e., when you have a caught-up standby and a not-caught-up active, you can just swap their roles // and not call it a "warmup". assertThat(client1, hasProperty("activeTasks", ClientState::activeTasks, mkSet(TASK_0_0))); assertThat(client2, hasProperty("activeTasks", ClientState::activeTasks, mkSet())); assertThat(client1, hasProperty("standbyTasks", ClientState::standbyTasks, mkSet())); assertThat(client2, hasProperty("standbyTasks", ClientState::standbyTasks, mkSet(TASK_0_0))); }
@Override public String description() { return description; }
@Test public void testDescription() { assertThat(client.description()).isEqualTo("Spring Cloud Tencent Polaris Discovery Client."); }
@Override public int delete(String id) { final Optional<AuthzRoleDTO> role = get(id); final int delete = super.delete(id); if (delete > 0) { role.ifPresent(r -> userService.dissociateAllUsersFromRole(r.toLegacyRole())); } return delete; }
@Test void delete() { final String roleId = "5d41bb973086a840541a3ed2"; final Optional<AuthzRoleDTO> role = service.get(roleId); assertThat(role).isPresent(); service.delete(roleId); verify(userService).dissociateAllUsersFromRole(role.get().toLegacyRole()); }
public String toJsonString(Object object) { return String.valueOf(toJson(object)); }
@Test public void testComplexObject() { Object complex = Pair.of("nested", Pair.of("moreNested", Pair.of("mostNestedSecret", SECRET))); String output = _obfuscator.toJsonString(complex); Assert.assertFalse(output.contains(SECRET)); }
@Override public KeyValueIterator<Windowed<K>, V> fetch(final K key) { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlySessionStore<K, V>> stores = storeProvider.stores(storeName, queryableStoreType); for (final ReadOnlySessionStore<K, V> store : stores) { try { final KeyValueIterator<Windowed<K>, V> result = store.fetch(key); if (!result.hasNext()) { result.close(); } else { return result; } } catch (final InvalidStateStoreException ise) { throw new InvalidStateStoreException("State store [" + storeName + "] is not available anymore" + " and may have been migrated to another instance; " + "please re-discover its location from the state metadata. " + "Original error message: " + ise); } } return KeyValueIterators.emptyIterator(); }
@Test public void shouldFetchKeyRangeAcrossStoresWithNullKeyFromKeyTo() { final ReadOnlySessionStoreStub<String, Long> secondUnderlying = new ReadOnlySessionStoreStub<>(); stubProviderTwo.addStore(storeName, secondUnderlying); underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 0L); secondUnderlying.put(new Windowed<>("b", new SessionWindow(0, 0)), 10L); final List<KeyValue<Windowed<String>, Long>> results = StreamsTestUtils.toList(sessionStore.fetch(null, null)); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new SessionWindow(0, 0)), 0L), KeyValue.pair(new Windowed<>("b", new SessionWindow(0, 0)), 10L)))); }
@Override public byte[] serialize(final String topic, final List<?> data) { if (data == null) { return null; } try { final StringWriter stringWriter = new StringWriter(); final CSVPrinter csvPrinter = new CSVPrinter(stringWriter, csvFormat); csvPrinter.printRecord(() -> new FieldIterator(data, schema)); final String result = stringWriter.toString(); return result.substring(0, result.length() - 2).getBytes(StandardCharsets.UTF_8); } catch (final Exception e) { throw new SerializationException("Error serializing CSV message", e); } }
@Test public void shouldSerializeLargeDecimalWithoutThousandSeparator() { // Given: givenSingleColumnSerializer(SqlTypes.decimal(4, 2)); final List<?> values = Collections.singletonList(new BigDecimal("1234567890.00")); // When: final byte[] bytes = serializer.serialize("", values); // Then: assertThat(new String(bytes, StandardCharsets.UTF_8), is("1234567890.00")); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { if (!(statement.getStatement() instanceof CreateSource) && !(statement.getStatement() instanceof CreateAsSelect)) { return statement; } try { if (statement.getStatement() instanceof CreateSource) { final ConfiguredStatement<CreateSource> createStatement = (ConfiguredStatement<CreateSource>) statement; return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement); } else { final ConfiguredStatement<CreateAsSelect> createStatement = (ConfiguredStatement<CreateAsSelect>) statement; return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse( createStatement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } }
@Test public void shouldInjectForCsStatement() { // Given: givenKeyAndValueInferenceSupported(); // When: final ConfiguredStatement<CreateStream> result = injector.inject(csStatement); // Then: assertThat(result.getStatement().getElements(), is(combineElements(INFERRED_KSQL_KEY_SCHEMA_STREAM, INFERRED_KSQL_VALUE_SCHEMA))); assertThat(result.getMaskedStatementText(), is( "CREATE STREAM `cs` (" + "`key` STRING KEY, " + "`intField` INTEGER, " + "`bigIntField` BIGINT, " + "`doubleField` DOUBLE, " + "`stringField` STRING, " + "`booleanField` BOOLEAN, " + "`arrayField` ARRAY<INTEGER>, " + "`mapField` MAP<STRING, BIGINT>, " + "`structField` STRUCT<`s0` BIGINT>, " + "`decimalField` DECIMAL(4, 2)) " + "WITH (KAFKA_TOPIC='some-topic', KEY_FORMAT='protobuf', VALUE_FORMAT='avro');" )); }
public Schema toKsqlSchema(final Schema schema) { try { final Schema rowSchema = toKsqlFieldSchema(schema); if (rowSchema.type() != Schema.Type.STRUCT) { throw new KsqlException("KSQL stream/table schema must be structured"); } if (rowSchema.fields().isEmpty()) { throw new KsqlException("Schema does not include any columns with " + "types that ksqlDB supports." + System.lineSeparator() + "schema: " + FORMATTER.format(schema)); } return rowSchema; } catch (final UnsupportedTypeException e) { throw new KsqlException("Unsupported type at root of schema: " + e.getMessage(), e); } }
@Test public void shouldTranslateNested() { final Schema connectInnerSchema = SchemaBuilder .struct() .field("intField", Schema.INT32_SCHEMA) .build(); final Schema connectSchema = SchemaBuilder .struct() .field("structField", connectInnerSchema) .build(); final Schema ksqlSchema = translator.toKsqlSchema(connectSchema); assertThat(ksqlSchema.field(nameTranslator.apply("structField")), notNullValue()); final Schema innerSchema = ksqlSchema.field(nameTranslator.apply("structField")).schema(); assertThat(innerSchema.fields().size(), equalTo(connectInnerSchema.fields().size())); for (int i = 0; i < connectInnerSchema.fields().size(); i++) { assertThat( nameTranslator.apply(innerSchema.fields().get(i).name()), equalTo(nameTranslator.apply(connectInnerSchema.fields().get(i).name()))); assertThat( innerSchema.fields().get(i).schema().type(), equalTo(connectInnerSchema.fields().get(i).schema().type())); assertThat(innerSchema.fields().get(i).schema().isOptional(), is(true)); } }
public static void mergeParams( Map<String, ParamDefinition> params, Map<String, ParamDefinition> paramsToMerge, MergeContext context) { if (paramsToMerge == null) { return; } Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream()) .forEach( name -> { ParamDefinition paramToMerge = paramsToMerge.get(name); if (paramToMerge == null) { return; } if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) { Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name); Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name); mergeParams( baseMap, toMergeMap, MergeContext.copyWithParentMode( context, params.getOrDefault(name, paramToMerge).getMode())); params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, baseMap)); } else if (paramToMerge.getType() == ParamType.STRING_MAP && paramToMerge.isLiteral()) { Map<String, String> baseMap = stringMapValueOrEmpty(params, name); Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name); baseMap.putAll(toMergeMap); params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, baseMap)); } else { params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, paramToMerge.getValue())); } }); }
@Test public void testMergeUpstreamRestartWithMutableOnStart() throws JsonProcessingException { Map<String, ParamDefinition> allParams = parseParamDefMap( "{'workflow_default_param': {'type': 'STRING','value': 'default_value','mode': 'MUTABLE_ON_START', 'meta': {'source': 'SYSTEM_DEFAULT'}}}"); Map<String, ParamDefinition> paramsToMerge = parseParamDefMap( "{'workflow_default_param': {'type': 'STRING','value': 'parent_wf_defined_value', 'mode': 'MUTABLE', 'meta': {'source': 'DEFINITION'}}}"); AssertHelper.assertThrows( "throws exception when a non-upstream source tries to merge param with less strict mode", MaestroValidationException.class, "Cannot modify param with mode [MUTABLE_ON_START] for parameter [workflow_default_param]", () -> ParamsMergeHelper.mergeParams(allParams, paramsToMerge, upstreamRestartMergeContext)); }
public static Map<String/*master addr*/, List<String>/*slave addr*/> fetchMasterAndSlaveDistinguish( final MQAdminExt adminExt, final String clusterName) throws InterruptedException, RemotingConnectException, RemotingTimeoutException, RemotingSendRequestException, MQBrokerException { Map<String, List<String>> masterAndSlaveMap = new HashMap<>(4); ClusterInfo clusterInfoSerializeWrapper = adminExt.examineBrokerClusterInfo(); Set<String> brokerNameSet = clusterInfoSerializeWrapper.getClusterAddrTable().get(clusterName); if (brokerNameSet == null) { System.out.printf("[error] %s", ERROR_MESSAGE); return masterAndSlaveMap; } for (String brokerName : brokerNameSet) { BrokerData brokerData = clusterInfoSerializeWrapper.getBrokerAddrTable().get(brokerName); if (brokerData == null || brokerData.getBrokerAddrs() == null) { continue; } String masterAddr = brokerData.getBrokerAddrs().get(MixAll.MASTER_ID); if (masterAddr == null) { masterAndSlaveMap.putIfAbsent(NO_MASTER_PLACEHOLDER, new ArrayList<>()); } else { masterAndSlaveMap.put(masterAddr, new ArrayList<>()); } for (Entry<Long, String> brokerAddrEntry : brokerData.getBrokerAddrs().entrySet()) { if (brokerAddrEntry.getValue() == null || brokerAddrEntry.getKey() == MixAll.MASTER_ID) { continue; } if (masterAddr == null) { masterAndSlaveMap.get(NO_MASTER_PLACEHOLDER).add(brokerAddrEntry.getValue()); } else { masterAndSlaveMap.get(masterAddr).add(brokerAddrEntry.getValue()); } } } return masterAndSlaveMap; }
@Test public void testFetchMasterAndSlaveDistinguish() throws InterruptedException, MQBrokerException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException { Map<String, List<String>> result = CommandUtil.fetchMasterAndSlaveDistinguish(defaultMQAdminExtImpl, "default-cluster"); assertThat(result.get(CommandUtil.NO_MASTER_PLACEHOLDER).get(0)).isEqualTo("127.0.0.1:10911"); }
@Override public int read(byte[] bytesBuffer, int offset, int length) throws IOException { return readInternal(new ByteArrayTargetBuffer(bytesBuffer, offset), offset, length, ReadType.READ_INTO_BYTE_ARRAY, mPosition, false); }
@Test public void readPagesMetrics() throws Exception { int fileSize = mPageSize * 5; byte[] testData = BufferUtils.getIncreasingByteArray(fileSize); ByteArrayCacheManager manager = new ByteArrayCacheManager(); LocalCacheFileInStream stream = setupWithSingleFile(testData, manager); // cache miss int readSize = fileSize - 1; byte[] cacheMiss = new byte[readSize]; stream.read(cacheMiss); Assert.assertEquals(0, MetricsSystem.meter(MetricKey.CLIENT_CACHE_BYTES_READ_CACHE.getName()).getCount()); Assert.assertEquals(readSize, MetricsSystem.meter( MetricKey.CLIENT_CACHE_BYTES_REQUESTED_EXTERNAL.getName()).getCount()); Assert.assertEquals(fileSize, MetricsSystem.meter(MetricKey.CLIENT_CACHE_BYTES_READ_EXTERNAL.getName()).getCount()); Assert.assertEquals(5, MetricsSystem.counter(MetricKey.CLIENT_CACHE_EXTERNAL_REQUESTS.getName()).getCount()); Assert.assertEquals(0, MetricsSystem.counter(MetricKey.CLIENT_CACHE_HIT_REQUESTS.getName()).getCount()); // cache hit stream.read(); Assert.assertEquals(readSize, MetricsSystem.meter( MetricKey.CLIENT_CACHE_BYTES_REQUESTED_EXTERNAL.getName()).getCount()); Assert.assertEquals(fileSize, MetricsSystem.meter(MetricKey.CLIENT_CACHE_BYTES_READ_EXTERNAL.getName()).getCount()); Assert.assertEquals(5, MetricsSystem.counter(MetricKey.CLIENT_CACHE_EXTERNAL_REQUESTS.getName()).getCount()); Assert.assertEquals(1, MetricsSystem.counter(MetricKey.CLIENT_CACHE_HIT_REQUESTS.getName()).getCount()); }
public Optional<UserDto> authenticate(HttpRequest request) { return extractCredentialsFromHeader(request) .flatMap(credentials -> Optional.ofNullable(authenticate(credentials, request))); }
@Test public void does_not_authenticate_from_user_token_when_token_is_invalid() { when(userTokenAuthentication.authenticate(request)).thenReturn(Optional.empty()); when(request.getHeader(AUTHORIZATION_HEADER)).thenReturn("Basic " + toBase64("token:")); assertThatThrownBy(() -> underTest.authenticate(request)) .hasMessage("User doesn't exist") .isInstanceOf(AuthenticationException.class) .hasFieldOrPropertyWithValue("source", Source.local(SONARQUBE_TOKEN)); verifyNoInteractions(authenticationEvent); verify(request, times(0)).setAttribute(anyString(), anyString()); }
@Override public void addToQueue(Runnable r) { requireNonNull(r); executorService.addToQueue(() -> { try { r.run(); } catch (Exception e) { LOG.error("Asynchronous task failed", e); } }); }
@Test public void addToQueue_fails_with_NPE_if_Runnable_is_null() { assertThatThrownBy(() -> underTest.addToQueue(null)) .isInstanceOf(NullPointerException.class); }
public static List<IUser> convertFromProxyPentahoUsers( UserRoleSecurityInfo info, IRoleSupportSecurityManager rsm ) { List<ProxyPentahoUser> users = info.getUsers(); if ( users == null || users.isEmpty() ) { return Collections.emptyList(); } List<UserToRoleAssignment> assignments = info.getAssignments(); List<IUser> userList = new ArrayList<IUser>( users.size() ); for ( ProxyPentahoUser user : users ) { userList.add( convertFromProxyPentahoUser( user, assignments, rsm ) ); } return userList; }
@Test public void convertFromProxyPentahoUsers_CopiesEachUser() throws Exception { UserRoleSecurityInfo info = new UserRoleSecurityInfo(); info.setUsers( Arrays.asList( pentahoUser( "user1" ), pentahoUser( "user2" ) ) ); IRoleSupportSecurityManager manager = mockSecurityManager( false ); List<IUser> users = convertFromProxyPentahoUsers( info, manager ); assertNotNull( users ); assertEquals( 2, users.size() ); assertEquals( "user1", users.get( 0 ).getName() ); assertEquals( "user2", users.get( 1 ).getName() ); }
public static <T> String join(final String... elements) { return join(elements, EMPTY_STRING); }
@Test public void testStringJoin() { assertNotEquals(null, StringUtils.join("")); assertNotEquals(null, StringUtils.join(STRINGS)); }
public void checkSchemaChangeAllowed(Column other) throws DdlException { if (other.isGeneratedColumn()) { return; } if (Strings.isNullOrEmpty(other.name)) { throw new DdlException("Dest column name is empty"); } if (!ColumnType.isSchemaChangeAllowed(type, other.type)) { throw new DdlException("Can not change " + getType() + " to " + other.getType()); } if (this.aggregationType != other.aggregationType) { throw new DdlException("Can not change aggregation type"); } if (this.isAllowNull && !other.isAllowNull) { throw new DdlException("Can not change from nullable to non-nullable"); } // Adding a default value to a column without a default value is not supported if (!this.isSameDefaultValue(other)) { throw new DdlException(CAN_NOT_CHANGE_DEFAULT_VALUE); } if ((getPrimitiveType() == PrimitiveType.VARCHAR && other.getPrimitiveType() == PrimitiveType.VARCHAR) || (getPrimitiveType() == PrimitiveType.CHAR && other.getPrimitiveType() == PrimitiveType.VARCHAR) || (getPrimitiveType() == PrimitiveType.CHAR && other.getPrimitiveType() == PrimitiveType.CHAR)) { if (getStrLen() > other.getStrLen()) { throw new DdlException("Cannot shorten string length"); } } if (getPrimitiveType().isJsonType() && other.getPrimitiveType().isCharFamily()) { if (other.getStrLen() <= getPrimitiveType().getTypeSize()) { throw new DdlException("JSON needs minimum length of " + getPrimitiveType().getTypeSize()); } } }
@Test(expected = DdlException.class) public void testSchemaChangeAllowedNullToNonNull() throws DdlException { Column oldColumn = new Column("user", ScalarType.createType(PrimitiveType.INT), true, null, true, new ColumnDef.DefaultValueDef(true, new StringLiteral("0")), ""); Column newColumn = new Column("user", ScalarType.createType(PrimitiveType.INT), true, null, false, new ColumnDef.DefaultValueDef(true, new StringLiteral("0")), ""); oldColumn.checkSchemaChangeAllowed(newColumn); Assert.fail("No exception throws."); }
@Override public void close() throws IOException { InputFileBlockHolder.unset(); // close the current iterator this.currentIterator.close(); // exhaust the task iterator while (tasks.hasNext()) { tasks.next(); } }
@Test public void testClosureWithoutAnyRead() throws IOException { Integer totalTasks = 10; Integer recordPerTask = 10; List<FileScanTask> tasks = createFileScanTasks(totalTasks, recordPerTask); ClosureTrackingReader reader = new ClosureTrackingReader(table, tasks); reader.close(); tasks.forEach( t -> Assert.assertFalse( "Iterator should not be created eagerly for tasks", reader.hasIterator(t))); }
@Override public int run(String launcherVersion, String launcherMd5, ServerUrlGenerator urlGenerator, Map<String, String> env, Map<String, String> context) { int exitValue = 0; LOG.info("Agent launcher is version: {}", CurrentGoCDVersion.getInstance().fullVersion()); String[] command = new String[]{}; try { AgentBootstrapperArgs bootstrapperArgs = AgentBootstrapperArgs.fromProperties(context); ServerBinaryDownloader agentDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs); agentDownloader.downloadIfNecessary(DownloadableFile.AGENT); ServerBinaryDownloader pluginZipDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs); pluginZipDownloader.downloadIfNecessary(DownloadableFile.AGENT_PLUGINS); ServerBinaryDownloader tfsImplDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs); tfsImplDownloader.downloadIfNecessary(DownloadableFile.TFS_IMPL); command = agentInvocationCommand(agentDownloader.getMd5(), launcherMd5, pluginZipDownloader.getMd5(), tfsImplDownloader.getMd5(), env, context, agentDownloader.getExtraProperties()); LOG.info("Launching Agent with command: {}", join(command, " ")); Process agent = invoke(command); // The next lines prevent the child process from blocking on Windows AgentOutputAppender agentOutputAppenderForStdErr = new AgentOutputAppender(GO_AGENT_STDERR_LOG); AgentOutputAppender agentOutputAppenderForStdOut = new AgentOutputAppender(GO_AGENT_STDOUT_LOG); if (new SystemEnvironment().consoleOutToStdout()) { agentOutputAppenderForStdErr.writeTo(AgentOutputAppender.Outstream.STDERR); agentOutputAppenderForStdOut.writeTo(AgentOutputAppender.Outstream.STDOUT); } agent.getOutputStream().close(); AgentConsoleLogThread stdErrThd = new AgentConsoleLogThread(agent.getErrorStream(), agentOutputAppenderForStdErr); stdErrThd.start(); AgentConsoleLogThread stdOutThd = new AgentConsoleLogThread(agent.getInputStream(), agentOutputAppenderForStdOut); stdOutThd.start(); Shutdown shutdownHook = new Shutdown(agent); Runtime.getRuntime().addShutdownHook(shutdownHook); try { exitValue = agent.waitFor(); } catch (InterruptedException ie) { LOG.error("Agent was interrupted. Terminating agent and respawning. {}", ie.toString()); agent.destroy(); } finally { removeShutdownHook(shutdownHook); stdErrThd.stopAndJoin(); stdOutThd.stopAndJoin(); } } catch (Exception e) { LOG.error("Exception while executing command: {} - {}", join(command, " "), e.toString()); exitValue = EXCEPTION_OCCURRED; } return exitValue; }
@Test public void shouldStartSubprocess_withOverriddenArgs() throws InterruptedException { final List<String> cmd = new ArrayList<>(); AgentProcessParentImpl bootstrapper = createBootstrapper(cmd); int returnCode = bootstrapper.run("launcher_version", "bar", getURLGenerator(), Map.of(AgentProcessParentImpl.AGENT_STARTUP_ARGS, "foo bar baz with%20some%20space"), context()); String expectedAgentMd5 = TEST_AGENT.getMd5(); String expectedAgentPluginsMd5 = TEST_AGENT_PLUGINS.getMd5(); String expectedTfsMd5 = TEST_TFS_IMPL.getMd5(); assertThat(returnCode, is(42)); assertThat(cmd.toArray(new String[]{}), equalTo(new String[]{ (getProperty("java.home") + FileSystems.getDefault().getSeparator() + "bin" + FileSystems.getDefault().getSeparator() + "java"), "foo", "bar", "baz", "with some space", "-Dagent.plugins.md5=" + expectedAgentPluginsMd5, "-Dagent.binary.md5=" + expectedAgentMd5, "-Dagent.launcher.md5=bar", "-Dagent.tfs.md5=" + expectedTfsMd5, "-Dagent.bootstrapper.version=UNKNOWN", "-jar", "agent.jar", "-serverUrl", "http://localhost:" + server.getPort() + "/go/", "-sslVerificationMode", "NONE", "-rootCertFile", new File("/path/to/cert.pem").getAbsolutePath() })); }
@Override public void execute(final ConnectionSession connectionSession) { mergedResult = new LocalDataMergedResult(getDatabaseNames(connectionSession)); }
@Test void assertExecuteWithPrefixLike() throws SQLException { MySQLShowDatabasesStatement showDatabasesStatement = new MySQLShowDatabasesStatement(); ShowFilterSegment showFilterSegment = new ShowFilterSegment(0, 0); ShowLikeSegment showLikeSegment = new ShowLikeSegment(0, 0, "database%"); showFilterSegment.setLike(showLikeSegment); showDatabasesStatement.setFilter(showFilterSegment); ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); when(ProxyContext.getInstance().getAllDatabaseNames()).thenReturn(IntStream.range(0, 10).mapToObj(each -> String.format("database_%s", each)).collect(Collectors.toList())); ShowDatabasesExecutor executor = new ShowDatabasesExecutor(showDatabasesStatement); executor.execute(mockConnectionSession()); assertThat(getActual(executor), is(getExpected())); }
public static <@NonNull E> CompletableSource resolveScopeFromLifecycle( final LifecycleScopeProvider<E> provider) throws OutsideScopeException { return resolveScopeFromLifecycle(provider, true); }
@Test public void resolveScopeFromLifecycle_complete_noFirstElement() { PublishSubject<Integer> lifecycle = PublishSubject.create(); TestObserver<?> o = testSource(resolveScopeFromLifecycle(lifecycle, 3)); // Now we complete lifecycle.onComplete(); o.assertComplete(); }
@Override public void runIfState(State expectedState, Runnable action) { if (isState(expectedState)) { try { action.run(); } catch (Throwable t) { fatalErrorHandler.onFatalError(t); } } else { LOG.debug( "Ignoring scheduled action because expected state {} is not the actual state {}.", expectedState, state); } }
@Test void testRunIfStateWithStateMismatch() throws Exception { final AdaptiveScheduler scheduler = new AdaptiveSchedulerBuilder( createJobGraph(), mainThreadExecutor, EXECUTOR_RESOURCE.getExecutor()) .build(); AtomicBoolean ran = new AtomicBoolean(false); scheduler.runIfState(new DummyState(), () -> ran.set(true)); assertThat(ran.get()).isFalse(); }
public static Http2Headers toHttp2Headers(HttpMessage in, boolean validateHeaders) { HttpHeaders inHeaders = in.headers(); final Http2Headers out = new DefaultHttp2Headers(validateHeaders, inHeaders.size()); if (in instanceof HttpRequest) { HttpRequest request = (HttpRequest) in; String host = inHeaders.getAsString(HttpHeaderNames.HOST); if (isOriginForm(request.uri()) || isAsteriskForm(request.uri())) { out.path(new AsciiString(request.uri())); setHttp2Scheme(inHeaders, out); } else { URI requestTargetUri = URI.create(request.uri()); out.path(toHttp2Path(requestTargetUri)); // Take from the request-line if HOST header was empty host = isNullOrEmpty(host) ? requestTargetUri.getAuthority() : host; setHttp2Scheme(inHeaders, requestTargetUri, out); } setHttp2Authority(host, out); out.method(request.method().asciiName()); } else if (in instanceof HttpResponse) { HttpResponse response = (HttpResponse) in; out.status(response.status().codeAsText()); } // Add the HTTP headers which have not been consumed above toHttp2Headers(inHeaders, out); return out; }
@Test public void stripConnectionNomineesWithCsv() { HttpHeaders inHeaders = new DefaultHttpHeaders(); inHeaders.add(CONNECTION, "foo, bar"); inHeaders.add("foo", "baz"); inHeaders.add("bar", "qux"); inHeaders.add("hello", "world"); Http2Headers out = new DefaultHttp2Headers(); HttpConversionUtil.toHttp2Headers(inHeaders, out); assertEquals(1, out.size()); assertSame("world", out.get("hello")); }
@Override public boolean isSatisfied(int index, TradingRecord tradingRecord) { if (satisfied) { return false; } else if (rule == null) { satisfied = true; traceIsSatisfied(index, true); return true; } this.satisfied = this.rule.isSatisfied(index, tradingRecord); return this.satisfied; }
@Test public void isSatisfiedInReverseOrder() { assertTrue(rule.isSatisfied(5)); assertFalse(rule.isSatisfied(2)); assertFalse(rule.isSatisfied(1)); assertFalse(rule.isSatisfied(0)); }
int run() { final Map<String, String> configProps = options.getConfigFile() .map(Ksql::loadProperties) .orElseGet(Collections::emptyMap); final Map<String, String> sessionVariables = options.getVariables(); try (KsqlRestClient restClient = buildClient(configProps)) { try (Cli cli = cliBuilder.build( options.getStreamedQueryRowLimit(), options.getStreamedQueryTimeoutMs(), options.getOutputFormat(), restClient) ) { // Add CLI variables If defined by parameters cli.addSessionVariables(sessionVariables); if (options.getExecute().isPresent()) { return cli.runCommand(options.getExecute().get()); } else if (options.getScriptFile().isPresent()) { final File scriptFile = new File(options.getScriptFile().get()); if (scriptFile.exists() && scriptFile.isFile()) { return cli.runScript(scriptFile.getPath()); } else { throw new KsqlException("No such script file: " + scriptFile.getPath()); } } else { return cli.runInteractively(); } } } }
@Test public void shouldAddDefinedVariablesToCliBeforeRunningCommands() { // Given: when(options.getVariables()).thenReturn(ImmutableMap.of("env", "qa")); when(options.getExecute()).thenReturn(Optional.of("this is a command")); // When: ksql.run(); // Then: final InOrder inOrder = Mockito.inOrder(cli); inOrder.verify(cli).addSessionVariables(ImmutableMap.of("env", "qa")); inOrder.verify(cli).runCommand("this is a command"); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { locks.computeIfAbsent(msg.getOriginator(), SemaphoreWithTbMsgQueue::new) .addToQueueAndTryProcess(msg, ctx, this::processMsgAsync); }
@Test public void test_2_plus_2_body() { var node = initNode(TbRuleNodeMathFunctionType.ADD, new TbMathResult(TbMathArgumentType.MESSAGE_BODY, "result", 2, false, false, null), new TbMathArgument(TbMathArgumentType.MESSAGE_BODY, "a"), new TbMathArgument(TbMathArgumentType.MESSAGE_BODY, "b") ); TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, originator, TbMsgMetaData.EMPTY, JacksonUtil.newObjectNode().put("a", 2).put("b", 2).toString()); node.onMsg(ctx, msg); ArgumentCaptor<TbMsg> msgCaptor = ArgumentCaptor.forClass(TbMsg.class); verify(ctx, timeout(TIMEOUT)).tellSuccess(msgCaptor.capture()); TbMsg resultMsg = msgCaptor.getValue(); assertNotNull(resultMsg); assertNotNull(resultMsg.getData()); var resultJson = JacksonUtil.toJsonNode(resultMsg.getData()); assertTrue(resultJson.has("result")); assertEquals(4, resultJson.get("result").asInt()); }
@VisibleForTesting static void verifyImageMetadata(ImageMetadataTemplate metadata, Path metadataCacheDirectory) throws CacheCorruptedException { List<ManifestAndConfigTemplate> manifestsAndConfigs = metadata.getManifestsAndConfigs(); if (manifestsAndConfigs.isEmpty()) { throw new CacheCorruptedException(metadataCacheDirectory, "Manifest cache empty"); } if (manifestsAndConfigs.stream().anyMatch(entry -> entry.getManifest() == null)) { throw new CacheCorruptedException(metadataCacheDirectory, "Manifest(s) missing"); } if (metadata.getManifestList() == null && manifestsAndConfigs.size() != 1) { throw new CacheCorruptedException(metadataCacheDirectory, "Manifest list missing"); } ManifestTemplate firstManifest = manifestsAndConfigs.get(0).getManifest(); if (firstManifest instanceof V21ManifestTemplate) { if (metadata.getManifestList() != null || manifestsAndConfigs.stream().anyMatch(entry -> entry.getConfig() != null)) { throw new CacheCorruptedException(metadataCacheDirectory, "Schema 1 manifests corrupted"); } } else if (firstManifest instanceof BuildableManifestTemplate) { if (manifestsAndConfigs.stream().anyMatch(entry -> entry.getConfig() == null)) { throw new CacheCorruptedException(metadataCacheDirectory, "Schema 2 manifests corrupted"); } if (metadata.getManifestList() != null && manifestsAndConfigs.stream().anyMatch(entry -> entry.getManifestDigest() == null)) { throw new CacheCorruptedException(metadataCacheDirectory, "Schema 2 manifests corrupted"); } } else { throw new CacheCorruptedException( metadataCacheDirectory, "Unknown manifest type: " + firstManifest); } }
@Test public void testVerifyImageMetadata_manifestCacheEmpty() { ImageMetadataTemplate metadata = new ImageMetadataTemplate(null, Collections.emptyList()); try { CacheStorageReader.verifyImageMetadata(metadata, Paths.get("/cache/dir")); Assert.fail(); } catch (CacheCorruptedException ex) { MatcherAssert.assertThat(ex.getMessage(), CoreMatchers.startsWith("Manifest cache empty")); } }
@Override public boolean isSubscribed(String serviceName, String groupName, String clusters) throws NacosException { return grpcClientProxy.isSubscribed(serviceName, groupName, clusters); }
@Test void testIsSubscribed() throws NacosException { String serviceName = "service1"; String groupName = "group1"; String clusters = "cluster1"; assertFalse(delegate.isSubscribed(serviceName, groupName, clusters)); when(mockGrpcClient.isSubscribed(serviceName, groupName, clusters)).thenReturn(true); assertTrue(delegate.isSubscribed(serviceName, groupName, clusters)); }
public static String getRandomName(int number) { int combinationIdx = number % (LEFT.length * RIGHT.length); int rightIdx = combinationIdx / LEFT.length; int leftIdx = combinationIdx % LEFT.length; String name = String.format(NAME_FORMAT, LEFT[leftIdx], RIGHT[rightIdx]); String prefix = System.getProperty(MOBY_NAMING_PREFIX); if (prefix != null) { name = prefix + "_" + name; } return name; }
@Test public void getRandomNameWithPrefix() { System.setProperty(MobyNames.MOBY_NAMING_PREFIX, "foo"); String randomName = MobyNames.getRandomName(0); assertFalse(isNullOrEmptyAfterTrim(randomName)); assertThat(randomName).startsWith("foo_"); }
@UdafFactory(description = "sum int values in a list into a single int") public static TableUdaf<List<Integer>, Integer, Integer> sumIntList() { return new TableUdaf<List<Integer>, Integer, Integer>() { @Override public Integer initialize() { return 0; } @Override public Integer aggregate(final List<Integer> valueToAdd, final Integer aggregateValue) { if (valueToAdd == null) { return aggregateValue; } return aggregateValue + sumList(valueToAdd); } @Override public Integer merge(final Integer aggOne, final Integer aggTwo) { return aggOne + aggTwo; } @Override public Integer map(final Integer agg) { return agg; } @Override public Integer undo(final List<Integer> valueToUndo, final Integer aggregateValue) { if (valueToUndo == null) { return aggregateValue; } return aggregateValue - sumList(valueToUndo); } private int sumList(final List<Integer> list) { return sum(list, initialize(), Integer::sum); } }; }
@Test public void shouldSumIntList() { final TableUdaf<List<Integer>, Integer, Integer> udaf = ListSumUdaf.sumIntList(); final Integer[] values = new Integer[] {1, 1, 1, 1, 1}; final List<Integer> list = Arrays.asList(values); final Integer sum = udaf.aggregate(list, 0); assertThat(5, equalTo(sum)); }
public void changeLevel(LoggerLevel level) { Level logbackLevel = Level.toLevel(level.name()); database.enableSqlLogging(level == TRACE); helper.changeRoot(serverProcessLogging.getLogLevelConfig(), logbackLevel); LoggerFactory.getLogger(ServerLogging.class).info("Level of logs changed to {}", level); }
@Test public void changeLevel_to_trace_enables_db_logging() { LogLevelConfig logLevelConfig = LogLevelConfig.newBuilder(rootLoggerName).build(); when(serverProcessLogging.getLogLevelConfig()).thenReturn(logLevelConfig); reset(database); underTest.changeLevel(INFO); verify(database).enableSqlLogging(false); reset(database); underTest.changeLevel(DEBUG); verify(database).enableSqlLogging(false); reset(database); underTest.changeLevel(TRACE); verify(database).enableSqlLogging(true); }
public static ClusterId getContentClusterName(ApplicationInstance application, HostName hostName) { Set<ClusterId> contentClusterIdsOnHost = application.serviceClusters().stream() .filter(VespaModelUtil::isContent) .filter(cluster -> clusterHasInstanceOnHost(cluster, hostName)) .map(ServiceCluster::clusterId) .collect(Collectors.toSet()); if (contentClusterIdsOnHost.size() != 1) { throw new IllegalArgumentException("Expected exactly one content cluster within application " + application.applicationInstanceId() + " and host " + hostName + ", but found " + contentClusterIdsOnHost.size() + ": " + contentClusterIdsOnHost + ", application: " + application); } return contentClusterIdsOnHost.iterator().next(); }
@Test public void testGetContentClusterNameForSecondaryContentCluster() { ClusterId contentClusterName = VespaModelUtil.getContentClusterName(application, secondaryDistributor0.hostName()); assertEquals(contentClusterName, SECONDARY_CONTENT_CLUSTER_ID); }
@VisibleForTesting JobMeta filterPrivateDatabases( JobMeta jobMeta ) { Set<String> privateDatabases = jobMeta.getPrivateDatabases(); if ( privateDatabases != null ) { // keep only private transformation databases for ( Iterator<DatabaseMeta> it = jobMeta.getDatabases().iterator(); it.hasNext(); ) { DatabaseMeta databaseMeta = it.next(); String databaseName = databaseMeta.getName(); if ( !privateDatabases.contains( databaseName ) && !jobMeta.isDatabaseConnectionUsed( databaseMeta ) ) { it.remove(); } } } return jobMeta; }
@Test public void filterPrivateDatabasesWithOneInUseTest() { IUnifiedRepository purMock = mock( IUnifiedRepository.class ); JobMeta jobMeta = spy( JobMeta.class ); jobMeta.setDatabases( getDummyDatabases() ); jobMeta.setPrivateDatabases( new HashSet<>( ) ); when( jobMeta.isDatabaseConnectionUsed( getDummyDatabases().get( 0 ) ) ).thenReturn( true ); StreamToJobNodeConverter jobConverter = new StreamToJobNodeConverter( purMock ); assertEquals( 1, jobConverter.filterPrivateDatabases( jobMeta ).getDatabases().size() ); }
public Search saveForUser(Search search, SearchUser searchUser) { this.executionGuard.check(search, searchUser::canReadStream); final Optional<Search> previous = Optional.ofNullable(search.id()).flatMap(dbService::get); if (!searchUser.isAdmin() && !previous.map(searchUser::owns).orElse(true)) { throw new PermissionException("Unable to update search with id <" + search.id() + ">, already exists and user is not permitted to overwrite it."); } return dbService.save(search.withOwner(searchUser.username())); }
@Test public void guardExceptionOnPostLeadsTo403() { final Search search = mockSearchWithOwner("someone"); final SearchUser searchUser = mock(SearchUser.class); throwGuardExceptionFor(search); assertThatExceptionOfType(ForbiddenException.class) .isThrownBy(() -> sut.saveForUser(search, searchUser)); }
@Override public Object toKsqlRow(final Schema connectSchema, final Object connectData) { if (connectData == null) { return null; } return toKsqlValue(schema, connectSchema, connectData, ""); }
@Test public void shouldReturnBytesType() { // Given: final ConnectDataTranslator connectToKsqlTranslator = new ConnectDataTranslator(SchemaBuilder.OPTIONAL_BYTES_SCHEMA); // When: final Object row = connectToKsqlTranslator.toKsqlRow(Schema.BYTES_SCHEMA, ByteBuffer.wrap(new byte[] {123})); // Then: assertTrue(row instanceof ByteBuffer); assertThat(row, is(ByteBuffer.wrap(new byte[] {123}))); }
@Override public void afterMethod(final TargetAdviceObject target, final TargetAdviceMethod method, final Object[] args, final Object result, final String pluginType) { Span span = (Span) target.getAttachment(); span.setStatus(StatusCode.OK); span.end(); }
@Test void assertMethod() { TargetAdviceObjectFixture adviceObjectFixture = new TargetAdviceObjectFixture(); OpenTelemetrySQLParserEngineAdvice advice = new OpenTelemetrySQLParserEngineAdvice(); advice.beforeMethod(adviceObjectFixture, null, new Object[]{SQL, true}, "OpenTelemetry"); advice.afterMethod(adviceObjectFixture, null, new Object[]{SQL, true}, null, "OpenTelemetry"); List<SpanData> spanItems = testExporter.getFinishedSpanItems(); assertCommonData(spanItems); assertThat(spanItems.iterator().next().getStatus().getStatusCode(), is(StatusCode.OK)); }
@Override public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException { if(failure != null) { if(log.isDebugEnabled()) { log.debug(String.format("Skip setting timestamp for %s due to previous failure %s", file, failure.getMessage())); } throw new FTPExceptionMappingService().map("Cannot change timestamp of {0}", failure, file); } try { if(null != status.getModified()) { final MDTMSecondsDateFormatter formatter = new MDTMSecondsDateFormatter(); // The utime() function sets the access and modification times of the named // file from the structures in the argument array timep. // The access time is set to the value of the first element, // and the modification time is set to the value of the second element // Accessed date, modified date, created date if(!session.getClient().sendSiteCommand(String.format("UTIME %s %s %s %s UTC", file.getAbsolute(), formatter.format(new Date(System.currentTimeMillis()), TimeZone.getTimeZone("UTC")), formatter.format(new Date(status.getModified()), TimeZone.getTimeZone("UTC")), formatter.format(new Date(status.getModified()), TimeZone.getTimeZone("UTC"))))) { throw failure = new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString()); } } } catch(IOException e) { throw new FTPExceptionMappingService().map("Cannot change timestamp of {0}", e, file); } }
@Test(expected = BackgroundException.class) public void testSetTimestamp() throws Exception { final FTPWorkdirService workdir = new FTPWorkdirService(session); final long modified = System.currentTimeMillis(); final Path test = new Path(workdir.find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new FTPTouchFeature(session).touch(test, new TransferStatus()); new FTPUTIMETimestampFeature(session).setTimestamp(test, modified); new FTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static Class<?> getBoxedClass(Class<?> type) { if (type == boolean.class) { return Boolean.class; } else if (type == char.class) { return Character.class; } else if (type == byte.class) { return Byte.class; } else if (type == short.class) { return Short.class; } else if (type == int.class) { return Integer.class; } else if (type == long.class) { return Long.class; } else if (type == float.class) { return Float.class; } else if (type == double.class) { return Double.class; } else { return type; } }
@Test void testGetBoxedClass() { Assertions.assertEquals(Boolean.class, ClassUtils.getBoxedClass(boolean.class)); Assertions.assertEquals(Character.class, ClassUtils.getBoxedClass(char.class)); Assertions.assertEquals(Byte.class, ClassUtils.getBoxedClass(byte.class)); Assertions.assertEquals(Short.class, ClassUtils.getBoxedClass(short.class)); Assertions.assertEquals(Integer.class, ClassUtils.getBoxedClass(int.class)); Assertions.assertEquals(Long.class, ClassUtils.getBoxedClass(long.class)); Assertions.assertEquals(Float.class, ClassUtils.getBoxedClass(float.class)); Assertions.assertEquals(Double.class, ClassUtils.getBoxedClass(double.class)); Assertions.assertEquals(ClassUtilsTest.class, ClassUtils.getBoxedClass(ClassUtilsTest.class)); }
@Override public MetricsCollector create(final MetricConfiguration metricConfig) { switch (metricConfig.getType()) { case COUNTER: return new PrometheusMetricsCounterCollector(metricConfig); case GAUGE: return new PrometheusMetricsGaugeCollector(metricConfig); case HISTOGRAM: return new PrometheusMetricsHistogramCollector(metricConfig); case SUMMARY: return new PrometheusMetricsSummaryCollector(metricConfig); case GAUGE_METRIC_FAMILY: return new PrometheusMetricsGaugeMetricFamilyCollector(metricConfig); default: throw new UnsupportedOperationException(String.format("Can not support type `%s`.", metricConfig.getType())); } }
@Test void assertCreateCounterCollector() { MetricConfiguration config = new MetricConfiguration("test_counter", MetricCollectorType.COUNTER, null, Collections.emptyList(), Collections.emptyMap()); assertThat(new PrometheusMetricsCollectorFactory().create(config), instanceOf(PrometheusMetricsCounterCollector.class)); }
public boolean sendHeartbeatToAllBrokerWithLock() { if (this.lockHeartbeat.tryLock()) { try { if (clientConfig.isUseHeartbeatV2()) { return this.sendHeartbeatToAllBrokerV2(false); } else { return this.sendHeartbeatToAllBroker(); } } catch (final Exception e) { log.error("sendHeartbeatToAllBroker exception", e); } finally { this.lockHeartbeat.unlock(); } } else { log.warn("lock heartBeat, but failed. [{}]", this.clientId); } return false; }
@Test public void testSendHeartbeatToAllBrokerWithLockV1() { brokerAddrTable.put(defaultBroker, createBrokerAddrMap()); consumerTable.put(group, createMQConsumerInner()); assertTrue(mqClientInstance.sendHeartbeatToAllBrokerWithLock()); }