focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static SqlDecimal of(final int precision, final int scale) { return new SqlDecimal(precision, scale); }
@Test public void shouldReturnBaseType() { MatcherAssert.assertThat(SqlDecimal.of(10, 2).baseType(), Matchers.is(SqlBaseType.DECIMAL)); }
@Override public List<TenantPackageDO> getTenantPackageListByStatus(Integer status) { return tenantPackageMapper.selectListByStatus(status); }
@Test public void testGetTenantPackageListByStatus() { // mock 数据 TenantPackageDO dbTenantPackage = randomPojo(TenantPackageDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); tenantPackageMapper.insert(dbTenantPackage); // 测试 status 不匹配 tenantPackageMapper.insert(cloneIgnoreId(dbTenantPackage, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 调用 List<TenantPackageDO> list = tenantPackageService.getTenantPackageListByStatus( CommonStatusEnum.ENABLE.getStatus()); assertEquals(1, list.size()); assertPojoEquals(dbTenantPackage, list.get(0)); }
@Override public String execute(CommandContext commandContext, String[] args) { if (commandContext.isHttp()) { Map<String, Object> result = new HashMap<>(); result.put("checkStatus", serializeCheckUtils.getStatus()); result.put("checkSerializable", serializeCheckUtils.isCheckSerializable()); result.put("allowedPrefix", serializeCheckUtils.getAllowedList()); result.put("disAllowedPrefix", serializeCheckUtils.getDisAllowedList()); return JsonUtils.toJson(result); } else { return "CheckStatus: " + serializeCheckUtils.getStatus() + "\n\n" + "CheckSerializable: " + serializeCheckUtils.isCheckSerializable() + "\n\n" + "AllowedPrefix:" + "\n" + serializeCheckUtils.getAllowedList().stream().sorted().collect(Collectors.joining("\n")) + "\n\n" + "DisAllowedPrefix:" + "\n" + serializeCheckUtils.getDisAllowedList().stream().sorted().collect(Collectors.joining("\n")) + "\n\n"; } }
@Test void testNotify() { FrameworkModel frameworkModel = new FrameworkModel(); SerializeSecurityManager ssm = frameworkModel.getBeanFactory().getBean(SerializeSecurityManager.class); SerializeCheckStatus serializeCheckStatus = new SerializeCheckStatus(frameworkModel); CommandContext commandContext1 = Mockito.mock(CommandContext.class); Mockito.when(commandContext1.isHttp()).thenReturn(false); CommandContext commandContext2 = Mockito.mock(CommandContext.class); Mockito.when(commandContext2.isHttp()).thenReturn(true); Assertions.assertFalse( serializeCheckStatus.execute(commandContext1, null).contains("Test1234")); Assertions.assertFalse( serializeCheckStatus.execute(commandContext2, null).contains("Test1234")); ssm.addToAllowed("Test1234"); Assertions.assertTrue( serializeCheckStatus.execute(commandContext1, null).contains("Test1234")); Assertions.assertTrue( serializeCheckStatus.execute(commandContext2, null).contains("Test1234")); Assertions.assertFalse( serializeCheckStatus.execute(commandContext1, null).contains("Test4321")); Assertions.assertFalse( serializeCheckStatus.execute(commandContext2, null).contains("Test4321")); ssm.addToDisAllowed("Test4321"); Assertions.assertTrue( serializeCheckStatus.execute(commandContext1, null).contains("Test4321")); Assertions.assertTrue( serializeCheckStatus.execute(commandContext2, null).contains("Test4321")); Assertions.assertFalse( serializeCheckStatus.execute(commandContext1, null).contains("CheckSerializable: false")); Assertions.assertFalse( serializeCheckStatus.execute(commandContext2, null).contains("\"checkSerializable\":false")); ssm.setCheckSerializable(false); Assertions.assertTrue( serializeCheckStatus.execute(commandContext1, null).contains("CheckSerializable: false")); Assertions.assertTrue( serializeCheckStatus.execute(commandContext2, null).contains("\"checkSerializable\":false")); Assertions.assertFalse( serializeCheckStatus.execute(commandContext1, null).contains("CheckStatus: DISABLE")); Assertions.assertFalse( serializeCheckStatus.execute(commandContext2, null).contains("\"checkStatus\":\"DISABLE\"")); ssm.setCheckStatus(org.apache.dubbo.common.utils.SerializeCheckStatus.DISABLE); Assertions.assertTrue( serializeCheckStatus.execute(commandContext1, null).contains("CheckStatus: DISABLE")); Assertions.assertTrue( serializeCheckStatus.execute(commandContext2, null).contains("\"checkStatus\":\"DISABLE\"")); frameworkModel.destroy(); }
@VisibleForTesting static Estimate calculateDistinctValuesCount(List<HiveColumnStatistics> columnStatistics) { return columnStatistics.stream() .map(MetastoreHiveStatisticsProvider::getDistinctValuesCount) .filter(OptionalLong::isPresent) .map(OptionalLong::getAsLong) .peek(distinctValuesCount -> verify(distinctValuesCount >= 0, "distinctValuesCount must be greater than or equal to zero")) .max(Long::compare) .map(Estimate::of) .orElse(Estimate.unknown()); }
@Test public void testCalculateDistinctValuesCount() { assertEquals(calculateDistinctValuesCount(ImmutableList.of()), Estimate.unknown()); assertEquals(calculateDistinctValuesCount(ImmutableList.of(HiveColumnStatistics.empty())), Estimate.unknown()); assertEquals(calculateDistinctValuesCount(ImmutableList.of(HiveColumnStatistics.empty(), HiveColumnStatistics.empty())), Estimate.unknown()); assertEquals(calculateDistinctValuesCount(ImmutableList.of(distinctValuesCount(1))), Estimate.of(1)); assertEquals(calculateDistinctValuesCount(ImmutableList.of(distinctValuesCount(1), distinctValuesCount(2))), Estimate.of(2)); assertEquals(calculateDistinctValuesCount(ImmutableList.of(distinctValuesCount(1), HiveColumnStatistics.empty())), Estimate.of(1)); assertEquals(calculateDistinctValuesCount(ImmutableList.of(createBooleanColumnStatistics(OptionalLong.empty(), OptionalLong.empty(), OptionalLong.empty()))), Estimate.unknown()); assertEquals(calculateDistinctValuesCount(ImmutableList.of(createBooleanColumnStatistics(OptionalLong.of(1), OptionalLong.of(0), OptionalLong.empty()))), Estimate.of(1)); assertEquals(calculateDistinctValuesCount(ImmutableList.of(createBooleanColumnStatistics(OptionalLong.of(10), OptionalLong.empty(), OptionalLong.empty()))), Estimate.unknown()); assertEquals(calculateDistinctValuesCount(ImmutableList.of(createBooleanColumnStatistics(OptionalLong.of(10), OptionalLong.of(10), OptionalLong.empty()))), Estimate.of(2)); assertEquals(calculateDistinctValuesCount(ImmutableList.of(createBooleanColumnStatistics(OptionalLong.empty(), OptionalLong.of(10), OptionalLong.empty()))), Estimate.unknown()); assertEquals(calculateDistinctValuesCount(ImmutableList.of(createBooleanColumnStatistics(OptionalLong.of(0), OptionalLong.of(10), OptionalLong.empty()))), Estimate.of(1)); assertEquals(calculateDistinctValuesCount(ImmutableList.of(createBooleanColumnStatistics(OptionalLong.of(0), OptionalLong.of(0), OptionalLong.empty()))), Estimate.of(0)); assertEquals( calculateDistinctValuesCount(ImmutableList.of( createBooleanColumnStatistics(OptionalLong.of(0), OptionalLong.of(10), OptionalLong.empty()), createBooleanColumnStatistics(OptionalLong.of(1), OptionalLong.of(10), OptionalLong.empty()))), Estimate.of(2)); }
@Override public RequestFuture requestFuture(Request request) throws NacosException { Payload grpcRequest = GrpcUtils.convert(request); final ListenableFuture<Payload> requestFuture = grpcFutureServiceStub.request(grpcRequest); return new RequestFuture() { @Override public boolean isDone() { return requestFuture.isDone(); } @Override public Response get() throws Exception { Payload grpcResponse = requestFuture.get(); Response response = (Response) GrpcUtils.parse(grpcResponse); if (response instanceof ErrorResponse) { throw new NacosException(response.getErrorCode(), response.getMessage()); } return response; } @Override public Response get(long timeout) throws Exception { Payload grpcResponse = requestFuture.get(timeout, TimeUnit.MILLISECONDS); Response response = (Response) GrpcUtils.parse(grpcResponse); if (response instanceof ErrorResponse) { throw new NacosException(response.getErrorCode(), response.getMessage()); } return response; } }; }
@Test void testRequestFutureWithTimeoutFailure() throws Exception { assertThrows(NacosException.class, () -> { when(future.get(100L, TimeUnit.MILLISECONDS)).thenReturn(errorResponsePayload); RequestFuture requestFuture = connection.requestFuture(new HealthCheckRequest()); assertTrue(requestFuture.isDone()); requestFuture.get(100L); }); }
@Override public char readChar(@Nonnull String fieldName) throws IOException { FieldDefinition fd = cd.getField(fieldName); if (fd == null) { return 0; } validateTypeCompatibility(fd, CHAR); return super.readChar(fieldName); }
@Test(expected = IncompatibleClassChangeError.class) public void testReadChar_IncompatibleClass() throws Exception { reader.readChar("string"); }
public static boolean isClusterEnabled(AppSettings settings) { return isClusterEnabled(settings.getProps()); }
@Test @UseDataProvider("validIPv4andIPv6Addresses") public void test_isClusterEnabled(String host) { TestAppSettings settings = newSettingsForAppNode(host, of(CLUSTER_ENABLED.getKey(), "true")); assertThat(ClusterSettings.isClusterEnabled(settings)).isTrue(); settings = new TestAppSettings(of(CLUSTER_ENABLED.getKey(), "false")); assertThat(ClusterSettings.isClusterEnabled(settings)).isFalse(); }
public void edit( RunConfiguration runConfiguration ) { final String key = runConfiguration.getName(); RunConfigurationDialog dialog = new RunConfigurationDialog( spoonSupplier.get().getShell(), configurationManager, runConfiguration ); RunConfiguration savedRunConfiguration = dialog.open(); if ( savedRunConfiguration != null ) { configurationManager.delete( key ); configurationManager.save( savedRunConfiguration ); refreshTree(); updateLoadedJobs( key, savedRunConfiguration ); } }
@Test public void testEdit() throws Exception { DefaultRunConfiguration config = new DefaultRunConfiguration(); config.setName( "Test" ); config.setServer( "localhost" ); doNothing().when( delegate ).updateLoadedJobs( "Test", config ); try ( MockedConstruction<RunConfigurationDialog> mockedConfDialog = mockConstruction( RunConfigurationDialog.class, (mock, context) -> when( mock.open() ).thenReturn( config ) ) ) { delegate.edit( config ); verify( delegate, times( 1 ) ).updateLoadedJobs( "Test", config ); verify( service, times( 1 ) ).delete( "Test" ); verify( service, times( 1 ) ).save( config ); verify( spoon, times( 1 ) ).refreshTree( RunConfigurationFolderProvider.STRING_RUN_CONFIGURATIONS ); } }
ObjectFactory loadObjectFactory() { Class<? extends ObjectFactory> objectFactoryClass = options.getObjectFactoryClass(); ClassLoader classLoader = classLoaderSupplier.get(); ServiceLoader<ObjectFactory> loader = ServiceLoader.load(ObjectFactory.class, classLoader); if (objectFactoryClass == null) { return loadSingleObjectFactoryOrDefault(loader); } return loadSelectedObjectFactory(loader, objectFactoryClass); }
@Test void test_case_4_with_services_in_reverse_order() { io.cucumber.core.backend.Options options = () -> null; ObjectFactoryServiceLoader loader = new ObjectFactoryServiceLoader( () -> new ServiceLoaderTestClassLoader(ObjectFactory.class, OtherFactory.class, DefaultObjectFactory.class), options); assertThat(loader.loadObjectFactory(), instanceOf(OtherFactory.class)); }
public static Optional<Object> invokeMethodWithNoneParameter(Object target, String methodName) { return invokeMethod(target, methodName, null, null); }
@Test public void invokeMethodWithNoneParameter() { final TestReflect testReflect = new TestReflect(); String methodName = "noParams"; final Optional<Object> result = ReflectUtils.invokeMethodWithNoneParameter(testReflect, methodName); Assert.assertTrue(result.isPresent() && result.get() instanceof String); Assert.assertEquals(result.get(), testReflect.noParams()); }
@Override public SlotAssignmentResult ensure(long key) { return super.ensure0(key, 0); }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void testPut_whenDisposed() { hsa.dispose(); hsa.ensure(1); }
@Override public CompletableFuture<KubernetesWorkerNode> requestResource( TaskExecutorProcessSpec taskExecutorProcessSpec) { final KubernetesTaskManagerParameters parameters = createKubernetesTaskManagerParameters( taskExecutorProcessSpec, getBlockedNodeRetriever().getAllBlockedNodeIds()); final KubernetesPod taskManagerPod = KubernetesTaskManagerFactory.buildTaskManagerKubernetesPod( taskManagerPodTemplate, parameters); final String podName = taskManagerPod.getName(); final CompletableFuture<KubernetesWorkerNode> requestResourceFuture = new CompletableFuture<>(); requestResourceFutures.put(podName, requestResourceFuture); log.info( "Creating new TaskManager pod with name {} and resource <{},{}>.", podName, parameters.getTaskManagerMemoryMB(), parameters.getTaskManagerCPU()); final CompletableFuture<Void> createPodFuture = flinkKubeClient.createTaskManagerPod(taskManagerPod); FutureUtils.assertNoException( createPodFuture.handleAsync( (ignore, exception) -> { if (exception != null) { log.warn( "Could not create pod {}, exception: {}", podName, exception); CompletableFuture<KubernetesWorkerNode> future = requestResourceFutures.remove(taskManagerPod.getName()); if (future != null) { future.completeExceptionally(exception); } } else { if (requestResourceFuture.isCancelled()) { stopPod(podName); log.info( "pod {} is cancelled before create pod finish, stop it.", podName); } else { log.info("Pod {} is created.", podName); } } return null; }, getMainThreadExecutor())); FutureUtils.assertNoException( requestResourceFuture.handle( (ignore, t) -> { if (t == null) { return null; } // Unwrap CompletionException cause if any if (t instanceof CompletionException && t.getCause() != null) { t = t.getCause(); } if (t instanceof CancellationException) { requestResourceFutures.remove(taskManagerPod.getName()); if (createPodFuture.isDone()) { log.info( "pod {} is cancelled before scheduled, stop it.", podName); stopPod(taskManagerPod.getName()); } } else if (t instanceof RetryableException || t instanceof KubernetesClientException) { // ignore transient / retriable errors } else { log.error("Error completing resource request.", t); ExceptionUtils.rethrow(t); } return null; })); return requestResourceFuture; }
@Test void testCancelRequestedResource() throws Exception { new Context() { { final CompletableFuture<KubernetesPod> createPodFuture = new CompletableFuture<>(); final CompletableFuture<Void> createTaskManagerPodFuture = new CompletableFuture<>(); final CompletableFuture<String> stopPodFuture = new CompletableFuture<>(); flinkKubeClientBuilder .setCreateTaskManagerPodFunction( (pod) -> { createPodFuture.complete(pod); return createTaskManagerPodFuture; }) .setStopPodFunction( (podName) -> { stopPodFuture.complete(podName); return FutureUtils.completedVoidFuture(); }); runTest( () -> { // request new pod and then cancel it. runInMainThread( () -> { CompletableFuture<KubernetesWorkerNode> requestFuture = getDriver() .requestResource( TASK_EXECUTOR_PROCESS_SPEC); requestFuture.cancel(true); }); final KubernetesPod pod = new TestingKubernetesPod( createPodFuture .get(TIMEOUT_SEC, TimeUnit.SECONDS) .getName(), false, true); assertThat(stopPodFuture.isDone()).isFalse(); runInMainThread(() -> createTaskManagerPodFuture.complete(null)); // pod should be stopped when create pod rpc finished. final CompletableFuture<Void> validationFuture = stopPodFuture.thenAccept( (podName) -> assertThat(podName).isEqualTo(pod.getName())); validationFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS); }); } }; }
public static List<String> computeNameParts(String loggerName) { List<String> partList = new ArrayList<String>(); int fromIndex = 0; while (true) { int index = getSeparatorIndexOf(loggerName, fromIndex); if (index == -1) { partList.add(loggerName.substring(fromIndex)); break; } partList.add(loggerName.substring(fromIndex, index)); fromIndex = index + 1; } return partList; }
@Test public void smoke1() { List<String> witnessList = new ArrayList<String>(); witnessList.add("com"); witnessList.add("foo"); witnessList.add("Bar"); List<String> partList = LoggerNameUtil.computeNameParts("com.foo.Bar"); assertEquals(witnessList, partList); }
public static void deleteQuietly(File file) { Objects.requireNonNull(file, "file"); FileUtils.deleteQuietly(file); }
@Test void deleteQuietly() throws IOException { File tmpFile = DiskUtils.createTmpFile(UUID.randomUUID().toString(), ".ut"); DiskUtils.deleteQuietly(tmpFile); assertFalse(tmpFile.exists()); }
public static MetadataUpdate fromJson(String json) { return JsonUtil.parse(json, MetadataUpdateParser::fromJson); }
@Test public void testSetSnapshotRefBranchFromJsonDefault_ExplicitNullValues() { String action = MetadataUpdateParser.SET_SNAPSHOT_REF; long snapshotId = 1L; SnapshotRefType type = SnapshotRefType.BRANCH; String refName = "hank"; Integer minSnapshotsToKeep = null; Long maxSnapshotAgeMs = null; Long maxRefAgeMs = null; String json = "{\"action\":\"set-snapshot-ref\",\"ref-name\":\"hank\",\"snapshot-id\":1,\"type\":\"bRaNch\"," + "\"max-ref-age-ms\":null,\"min-snapshots-to-keep\":null,\"max-snapshot-age-ms\":null}"; MetadataUpdate expected = new MetadataUpdate.SetSnapshotRef( refName, snapshotId, type, minSnapshotsToKeep, maxSnapshotAgeMs, maxRefAgeMs); assertEquals(action, expected, MetadataUpdateParser.fromJson(json)); }
@Override public String getStatementName(StatementContext statementContext) { final ExtensionMethod extensionMethod = statementContext.getExtensionMethod(); if (extensionMethod == null) { return null; } final Class<?> clazz = extensionMethod.getType(); final Timed classTimed = clazz.getAnnotation(Timed.class); final Method method = extensionMethod.getMethod(); final Timed methodTimed = method.getAnnotation(Timed.class); // If the method is timed, figure out the name if (methodTimed != null) { String methodName = methodTimed.name().isEmpty() ? method.getName() : methodTimed.name(); if (methodTimed.absolute()) { return methodName; } else { // We need to check if the class has a custom timer name return classTimed == null || classTimed.name().isEmpty() ? MetricRegistry.name(clazz, methodName) : MetricRegistry.name(classTimed.name(), methodName); } } else if (classTimed != null) { // Maybe the class is timed? return classTimed.name().isEmpty() ? MetricRegistry.name(clazz, method.getName()) : MetricRegistry.name(classTimed.name(), method.getName()); } else { // No timers neither on the method or the class return null; } }
@Test public void testAnnotationOnMethodWithCustomName() throws Exception { when(ctx.getExtensionMethod()).thenReturn(new ExtensionMethod(Foo.class, Foo.class.getMethod("customUpdate"))); assertThat(timedAnnotationNameStrategy.getStatementName(ctx)) .isEqualTo("com.codahale.metrics.jdbi3.strategies.TimedAnnotationNameStrategyTest$Foo.custom-update"); }
@Override public boolean isInstalled(String coreExtensionName) { checkInitialized(); return installedCoreExtensions.stream() .anyMatch(t -> coreExtensionName.equals(t.getName())); }
@Test public void isInstalled_fails_with_ISE_if_called_before_setLoadedCoreExtensions() { assertThatThrownBy(() -> underTest.isInstalled("foo")) .isInstanceOf(IllegalStateException.class) .hasMessage("Repository has not been initialized yet"); }
public static Long validateIssuedAt(String claimName, Long claimValue) throws ValidateException { if (claimValue != null && claimValue < 0) throw new ValidateException(String.format("%s value must be null or non-negative; value given was \"%s\"", claimName, claimValue)); return claimValue; }
@Test public void testValidateIssuedAtAllowsZero() { Long expected = 0L; Long actual = ClaimValidationUtils.validateIssuedAt("iat", expected); assertEquals(expected, actual); }
@VisibleForTesting public static void validateAndResolveService(Service service, SliderFileSystem fs, org.apache.hadoop.conf.Configuration conf) throws IOException { boolean dnsEnabled = conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED, RegistryConstants.DEFAULT_DNS_ENABLED); if (dnsEnabled) { if (RegistryUtils.currentUser().length() > RegistryConstants.MAX_FQDN_LABEL_LENGTH) { throw new IllegalArgumentException( RestApiErrorMessages.ERROR_USER_NAME_INVALID); } userNamePattern.validate(RegistryUtils.currentUser()); } if (StringUtils.isEmpty(service.getName())) { throw new IllegalArgumentException( RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID); } if (StringUtils.isEmpty(service.getVersion())) { throw new IllegalArgumentException(String.format( RestApiErrorMessages.ERROR_APPLICATION_VERSION_INVALID, service.getName())); } validateNameFormat(service.getName(), conf); // If the service has no components, throw error if (!hasComponent(service)) { throw new IllegalArgumentException( "No component specified for " + service.getName()); } if (UserGroupInformation.isSecurityEnabled()) { validateKerberosPrincipal(service.getKerberosPrincipal()); } // Validate the Docker client config. try { validateDockerClientConfiguration(service, conf); } catch (IOException e) { throw new IllegalArgumentException(e); } // Validate there are no component name collisions (collisions are not // currently supported) and add any components from external services Configuration globalConf = service.getConfiguration(); Set<String> componentNames = new HashSet<>(); List<Component> componentsToRemove = new ArrayList<>(); List<Component> componentsToAdd = new ArrayList<>(); for (Component comp : service.getComponents()) { int maxCompLength = RegistryConstants.MAX_FQDN_LABEL_LENGTH; maxCompLength = maxCompLength - Long.toString(Long.MAX_VALUE).length(); if (dnsEnabled && comp.getName().length() > maxCompLength) { throw new IllegalArgumentException(String.format(RestApiErrorMessages .ERROR_COMPONENT_NAME_INVALID, maxCompLength, comp.getName())); } if (service.getName().equals(comp.getName())) { throw new IllegalArgumentException(String.format(RestApiErrorMessages .ERROR_COMPONENT_NAME_CONFLICTS_WITH_SERVICE_NAME, comp.getName(), service.getName())); } if (componentNames.contains(comp.getName())) { throw new IllegalArgumentException("Component name collision: " + comp.getName()); } // If artifact is of type SERVICE (which cannot be filled from global), // read external service and add its components to this service if (comp.getArtifact() != null && comp.getArtifact().getType() == Artifact.TypeEnum.SERVICE) { if (StringUtils.isEmpty(comp.getArtifact().getId())) { throw new IllegalArgumentException( RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID); } LOG.info("Marking {} for removal", comp.getName()); componentsToRemove.add(comp); List<Component> externalComponents = getComponents(fs, comp.getArtifact().getId()); for (Component c : externalComponents) { Component override = service.getComponent(c.getName()); if (override != null && override.getArtifact() == null) { // allow properties from external components to be overridden / // augmented by properties in this component, except for artifact // which must be read from external component override.mergeFrom(c); LOG.info("Merging external component {} from external {}", c .getName(), comp.getName()); } else { if (componentNames.contains(c.getName())) { throw new IllegalArgumentException("Component name collision: " + c.getName()); } componentNames.add(c.getName()); componentsToAdd.add(c); LOG.info("Adding component {} from external {}", c.getName(), comp.getName()); } } } else { // otherwise handle as a normal component componentNames.add(comp.getName()); // configuration comp.getConfiguration().mergeFrom(globalConf); } } service.getComponents().removeAll(componentsToRemove); service.getComponents().addAll(componentsToAdd); // Validate components and let global values take effect if component level // values are not provided Artifact globalArtifact = service.getArtifact(); Resource globalResource = service.getResource(); for (Component comp : service.getComponents()) { // fill in global artifact unless it is type SERVICE if (comp.getArtifact() == null && service.getArtifact() != null && service.getArtifact().getType() != Artifact.TypeEnum .SERVICE) { comp.setArtifact(globalArtifact); } // fill in global resource if (comp.getResource() == null) { comp.setResource(globalResource); } // validate dependency existence if (comp.getDependencies() != null) { for (String dependency : comp.getDependencies()) { if (!componentNames.contains(dependency)) { throw new IllegalArgumentException(String.format( RestApiErrorMessages.ERROR_DEPENDENCY_INVALID, dependency, comp.getName())); } } } validateComponent(comp, fs.getFileSystem(), conf); } validatePlacementPolicy(service.getComponents(), componentNames); // validate dependency tree sortByDependencies(service.getComponents()); // Service lifetime if not specified, is set to unlimited lifetime if (service.getLifetime() == null) { service.setLifetime(RestApiConstants.DEFAULT_UNLIMITED_LIFETIME); } }
@Test(timeout = 90000) public void testResourceValidation() throws Exception { assertEquals(RegistryConstants.MAX_FQDN_LABEL_LENGTH + 1, LEN_64_STR .length()); SliderFileSystem sfs = ServiceTestUtils.initMockFs(); Service app = new Service(); // no name try { ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); Assert.fail(EXCEPTION_PREFIX + "service with no name"); } catch (IllegalArgumentException e) { assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage()); } app.setName("test"); // no version try { ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); Assert.fail(EXCEPTION_PREFIX + " service with no version"); } catch (IllegalArgumentException e) { assertEquals(String.format(ERROR_APPLICATION_VERSION_INVALID, app.getName()), e.getMessage()); } app.setVersion("v1"); // bad format name String[] badNames = {"4finance", "Finance", "finance@home", LEN_64_STR}; for (String badName : badNames) { app.setName(badName); try { ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); Assert.fail(EXCEPTION_PREFIX + "service with bad name " + badName); } catch (IllegalArgumentException e) { } } // launch command not specified app.setName(LEN_64_STR); Component comp = new Component().name("comp1"); app.addComponent(comp); try { ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DEFAULT_DNS); Assert.fail(EXCEPTION_PREFIX + "service with no launch command"); } catch (IllegalArgumentException e) { assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND, e.getMessage()); } // launch command not specified app.setName(LEN_64_STR.substring(0, RegistryConstants .MAX_FQDN_LABEL_LENGTH)); try { ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); Assert.fail(EXCEPTION_PREFIX + "service with no launch command"); } catch (IllegalArgumentException e) { assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND, e.getMessage()); } // memory not specified comp.setLaunchCommand("sleep 1"); Resource res = new Resource(); app.setResource(res); try { ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); Assert.fail(EXCEPTION_PREFIX + "service with no memory"); } catch (IllegalArgumentException e) { assertEquals(String.format( RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID, comp.getName()), e.getMessage()); } // invalid no of cpus res.setMemory("100mb"); res.setCpus(-2); try { ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); Assert.fail( EXCEPTION_PREFIX + "service with invalid no of cpus"); } catch (IllegalArgumentException e) { assertEquals(String.format( RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE, comp.getName()), e.getMessage()); } // number of containers not specified res.setCpus(2); try { ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); Assert.fail(EXCEPTION_PREFIX + "service with no container count"); } catch (IllegalArgumentException e) { Assert.assertTrue(e.getMessage() .contains(ERROR_CONTAINERS_COUNT_INVALID)); } // specifying profile along with cpus/memory raises exception res.setProfile("hbase_finance_large"); try { ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); Assert.fail(EXCEPTION_PREFIX + "service with resource profile along with cpus/memory"); } catch (IllegalArgumentException e) { assertEquals(String.format(RestApiErrorMessages .ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED, comp.getName()), e.getMessage()); } // currently resource profile alone is not supported. // TODO: remove the next test once resource profile alone is supported. res.setCpus(null); res.setMemory(null); try { ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); Assert.fail(EXCEPTION_PREFIX + "service with resource profile only"); } catch (IllegalArgumentException e) { assertEquals(ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET, e.getMessage()); } // unset profile here and add cpus/memory back res.setProfile(null); res.setCpus(2); res.setMemory("2gb"); // null number of containers try { ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); Assert.fail(EXCEPTION_PREFIX + "null number of containers"); } catch (IllegalArgumentException e) { Assert.assertTrue(e.getMessage() .startsWith(ERROR_CONTAINERS_COUNT_INVALID)); } }
public static WildcardQueryBuilder wildcardQuery(String name, String query) { return new WildcardQueryBuilder(name, query); }
@Test public void testWildCardQuery() throws IOException { assertEquals("{\"wildcard\":{\"k1\":\"?aa*\"}}", toJson(QueryBuilders.wildcardQuery("k1", "?aa*"))); }
@Override public boolean isAutoUpdate() { return scmConfig.isAutoUpdate(); }
@Test public void shouldDelegateToSCMConfigForAutoUpdate() { SCM scm = mock(SCM.class); when(scm.isAutoUpdate()).thenReturn(false); PluggableSCMMaterialConfig pluggableSCMMaterialConfig = new PluggableSCMMaterialConfig(new CaseInsensitiveString("scm-name"), scm, null, null, false); assertThat(pluggableSCMMaterialConfig.isAutoUpdate(), is(false)); verify(scm).isAutoUpdate(); }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyWithOnlyNull() { Iterable<Object> actual = asList((Object) null); assertThat(actual).containsExactly((Object) null); }
@Nonnull public static <T> AggregateOperation1<T, LinTrendAccumulator, Double> linearTrend( @Nonnull ToLongFunctionEx<T> getXFn, @Nonnull ToLongFunctionEx<T> getYFn ) { checkSerializable(getXFn, "getXFn"); checkSerializable(getYFn, "getYFn"); return AggregateOperation .withCreate(LinTrendAccumulator::new) .andAccumulate((LinTrendAccumulator a, T item) -> a.accumulate(getXFn.applyAsLong(item), getYFn.applyAsLong(item))) .andCombine(LinTrendAccumulator::combine) .andDeduct(LinTrendAccumulator::deduct) .andExportFinish(LinTrendAccumulator::export); }
@Test public void when_linearTrend() { // Given AggregateOperation1<Entry<Long, Long>, LinTrendAccumulator, Double> op = linearTrend(Entry::getKey, Entry::getValue); Supplier<LinTrendAccumulator> createFn = op.createFn(); BiConsumer<? super LinTrendAccumulator, ? super Entry<Long, Long>> accFn = op.accumulateFn(); BiConsumer<? super LinTrendAccumulator, ? super LinTrendAccumulator> combineFn = op.combineFn(); BiConsumer<? super LinTrendAccumulator, ? super LinTrendAccumulator> deductFn = op.deductFn(); Function<? super LinTrendAccumulator, ? extends Double> finishFn = op.finishFn(); assertNotNull(createFn); assertNotNull(accFn); assertNotNull(combineFn); assertNotNull(deductFn); assertNotNull(finishFn); // When LinTrendAccumulator a1 = createFn.get(); accFn.accept(a1, entry(1L, 3L)); accFn.accept(a1, entry(2L, 5L)); assertEquals(2.0, finishFn.apply(a1), Double.MIN_VALUE); LinTrendAccumulator a2 = createFn.get(); accFn.accept(a2, entry(5L, 11L)); accFn.accept(a2, entry(6L, 13L)); assertEquals(2.0, finishFn.apply(a2), Double.MIN_VALUE); combineFn.accept(a1, a2); assertEquals(2.0, finishFn.apply(a1), Double.MIN_VALUE); deductFn.accept(a1, a2); assertEquals(2.0, finishFn.apply(a1), Double.MIN_VALUE); Double result = finishFn.apply(a1); assertEquals(Double.valueOf(2), result); // When LinTrendAccumulator acc = createFn.get(); // Then assertTrue("NaN expected if nothing accumulated", Double.isNaN(finishFn.apply(acc))); // When accFn.accept(acc, entry(2L, 1L)); // Then assertTrue("NaN expected if just single point accumulated", Double.isNaN(finishFn.apply(acc))); // When accFn.accept(acc, entry(2L, 1L)); // Then assertTrue("NaN expected if all data points are equal", Double.isNaN(finishFn.apply(acc))); // When accFn.accept(acc, entry(2L, 2L)); // Then assertTrue("NaN expected if all data points have same x value", Double.isNaN(finishFn.apply(acc))); }
@Override public T pollFirst() { if (_head == null) { return null; } return removeNode(_head); }
@Test public void testPollFirst() { List<Integer> control = new ArrayList<>(Arrays.asList(1, 2, 3)); LinkedDeque<Integer> q = new LinkedDeque<>(control); Assert.assertEquals(q.pollFirst(), control.remove(0)); Assert.assertEquals(q, control); }
@Nonnull public static BatchSource<String> files(@Nonnull String directory) { return filesBuilder(directory).build(); }
@Test public void files() throws Exception { // Given File directory = createTempDirectory(); File file1 = new File(directory, randomName()); appendToFile(file1, "hello", "world"); File file2 = new File(directory, randomName()); appendToFile(file2, "hello2", "world2"); // When BatchSource<String> source = Sources.files(directory.getPath()); // Then p.readFrom(source).writeTo(sink); execute(); int nodeCount = hz().getCluster().getMembers().size(); assertEquals(4 * nodeCount, sinkList.size()); }
@Override public String toString() { return "ROUND_ROBIN"; }
@Test void testToString() { DistributionSpec rr = new RoundRobinDistributionSpec(); assertEquals("ROUND_ROBIN", rr.toString()); }
@Override public boolean hasAnySuperAdmin(Collection<Long> ids) { if (CollectionUtil.isEmpty(ids)) { return false; } RoleServiceImpl self = getSelf(); return ids.stream().anyMatch(id -> { RoleDO role = self.getRoleFromCache(id); return role != null && RoleCodeEnum.isSuperAdmin(role.getCode()); }); }
@Test public void testHasAnySuperAdmin_false() { try (MockedStatic<SpringUtil> springUtilMockedStatic = mockStatic(SpringUtil.class)) { springUtilMockedStatic.when(() -> SpringUtil.getBean(eq(RoleServiceImpl.class))) .thenReturn(roleService); // mock 数据 RoleDO dbRole = randomPojo(RoleDO.class).setCode("tenant_admin"); roleMapper.insert(dbRole); // 准备参数 Long id = dbRole.getId(); // 调用,并调用 assertFalse(roleService.hasAnySuperAdmin(singletonList(id))); } }
@Override public Map<String, Set<String>> readPluginsStorages() { log.debug("Reading extensions storages from plugins"); Map<String, Set<String>> result = new LinkedHashMap<>(); List<PluginWrapper> plugins = pluginManager.getPlugins(); for (PluginWrapper plugin : plugins) { String pluginId = plugin.getDescriptor().getPluginId(); log.debug("Reading extensions storage from plugin '{}'", pluginId); Set<String> bucket = new HashSet<>(); try { log.debug("Read '{}'", EXTENSIONS_RESOURCE); ClassLoader pluginClassLoader = plugin.getPluginClassLoader(); try (InputStream resourceStream = pluginClassLoader.getResourceAsStream(EXTENSIONS_RESOURCE)) { if (resourceStream == null) { log.debug("Cannot find '{}'", EXTENSIONS_RESOURCE); } else { collectExtensions(resourceStream, bucket); } } debugExtensions(bucket); result.put(pluginId, bucket); } catch (IOException e) { log.error(e.getMessage(), e); } } return result; }
@Test @EnabledOnOs(WINDOWS) public void shouldUnlockFileAfterReadingExtensionsFromPlugin() throws Exception { PluginJar pluginJar = new PluginJar.Builder(pluginsPath.resolve("test-plugin.jar"), "test-plugin") .pluginClass(TestPlugin.class.getName()) .pluginVersion("1.2.3") .extension(TestExtension.class.getName()) .build(); assertTrue(pluginJar.file().exists()); PluginManager pluginManager = new JarPluginManager(pluginsPath); pluginManager.loadPlugins(); assertEquals(1, pluginManager.getPlugins().size()); LegacyExtensionFinder extensionFinder = new LegacyExtensionFinder(pluginManager); Map<String, Set<String>> pluginsStorages = extensionFinder.readPluginsStorages(); assertNotNull(pluginsStorages); pluginManager.unloadPlugin(pluginJar.pluginId()); boolean fileDeleted = pluginJar.file().delete(); Set<String> pluginStorages = pluginsStorages.get(pluginJar.pluginId()); assertNotNull(pluginStorages); assertEquals(1, pluginStorages.size()); assertThat(pluginStorages, contains(TestExtension.class.getName())); assertTrue(fileDeleted); assertFalse(pluginJar.file().exists()); }
public ConsumerConnection getConsumerConnectionList(final String addr, final String consumerGroup, final long timeoutMillis) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException { GetConsumerConnectionListRequestHeader requestHeader = new GetConsumerConnectionListRequestHeader(); requestHeader.setConsumerGroup(consumerGroup); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_CONSUMER_CONNECTION_LIST, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis); switch (response.getCode()) { case ResponseCode.SUCCESS: { return ConsumerConnection.decode(response.getBody(), ConsumerConnection.class); } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark(), addr); }
@Test public void assertGetConsumerConnectionList() throws RemotingException, InterruptedException, MQBrokerException { mockInvokeSync(); ConsumerConnection responseBody = new ConsumerConnection(); responseBody.setConsumeType(ConsumeType.CONSUME_ACTIVELY); responseBody.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_LAST_OFFSET); responseBody.setMessageModel(MessageModel.CLUSTERING); setResponseBody(responseBody); ConsumerConnection actual = mqClientAPI.getConsumerConnectionList(defaultBrokerAddr, "", defaultTimeout); assertNotNull(actual); assertEquals(ConsumeType.CONSUME_ACTIVELY, actual.getConsumeType()); assertEquals(ConsumeFromWhere.CONSUME_FROM_LAST_OFFSET, actual.getConsumeFromWhere()); assertEquals(MessageModel.CLUSTERING, actual.getMessageModel()); }
protected void subscribeURLs(URL url, NotifyListener listener, Set<String> serviceNames) { serviceNames = toTreeSet(serviceNames); String serviceNamesKey = toStringKeys(serviceNames); String serviceKey = url.getServiceKey(); logger.info( String.format("Trying to subscribe from apps %s for service key %s, ", serviceNamesKey, serviceKey)); // register ServiceInstancesChangedListener Lock appSubscriptionLock = getAppSubscription(serviceNamesKey); try { appSubscriptionLock.lock(); ServiceInstancesChangedListener serviceInstancesChangedListener = serviceListeners.get(serviceNamesKey); if (serviceInstancesChangedListener == null) { serviceInstancesChangedListener = serviceDiscovery.createListener(serviceNames); for (String serviceName : serviceNames) { List<ServiceInstance> serviceInstances = serviceDiscovery.getInstances(serviceName); if (CollectionUtils.isNotEmpty(serviceInstances)) { serviceInstancesChangedListener.onEvent( new ServiceInstancesChangedEvent(serviceName, serviceInstances)); } } serviceListeners.put(serviceNamesKey, serviceInstancesChangedListener); } if (!serviceInstancesChangedListener.isDestroyed()) { listener.addServiceListener(serviceInstancesChangedListener); serviceInstancesChangedListener.addListenerAndNotify(url, listener); ServiceInstancesChangedListener finalServiceInstancesChangedListener = serviceInstancesChangedListener; String serviceDiscoveryName = url.getParameter(RegistryConstants.REGISTRY_CLUSTER_KEY, url.getProtocol()); MetricsEventBus.post( RegistryEvent.toSsEvent( url.getApplicationModel(), serviceKey, Collections.singletonList(serviceDiscoveryName)), () -> { serviceDiscovery.addServiceInstancesChangedListener(finalServiceInstancesChangedListener); return null; }); } else { logger.info(String.format("Listener of %s has been destroyed by another thread.", serviceNamesKey)); serviceListeners.remove(serviceNamesKey); } } finally { appSubscriptionLock.unlock(); } }
@Test void testSubscribeURLs() { // interface to single app mapping Set<String> singleApp = new TreeSet<>(); singleApp.add(APP_NAME1); serviceDiscoveryRegistry.subscribeURLs(url, testServiceListener, singleApp); assertEquals(1, serviceDiscoveryRegistry.getServiceListeners().size()); verify(testServiceListener, times(1)).addServiceListener(instanceListener); verify(instanceListener, never()).onEvent(any()); verify(serviceDiscovery, times(1)).addServiceInstancesChangedListener(instanceListener); // interface to multiple apps mapping Set<String> multiApps = new TreeSet<>(); multiApps.add(APP_NAME1); multiApps.add(APP_NAME2); MockServiceInstancesChangedListener multiAppsInstanceListener = spy(new MockServiceInstancesChangedListener(multiApps, serviceDiscovery)); doNothing().when(multiAppsInstanceListener).onEvent(any()); List<URL> urls = new ArrayList<>(); urls.add(URL.valueOf("dubbo://127.0.0.1:20880/TestService")); doReturn(urls).when(multiAppsInstanceListener).getAddresses(any(), any()); when(serviceDiscovery.createListener(multiApps)).thenReturn(multiAppsInstanceListener); when(serviceDiscovery.getInstances(APP_NAME1)).thenReturn(instanceList1); when(serviceDiscovery.getInstances(APP_NAME2)).thenReturn(instanceList2); serviceDiscoveryRegistry.subscribeURLs(url, testServiceListener, multiApps); assertEquals(2, serviceDiscoveryRegistry.getServiceListeners().size()); assertEquals( instanceListener, serviceDiscoveryRegistry.getServiceListeners().get(toStringKeys(singleApp))); assertEquals( multiAppsInstanceListener, serviceDiscoveryRegistry.getServiceListeners().get(toStringKeys(multiApps))); verify(testServiceListener, times(1)).addServiceListener(multiAppsInstanceListener); verify(multiAppsInstanceListener, times(2)).onEvent(any()); verify(multiAppsInstanceListener, times(1)).addListenerAndNotify(any(), eq(testServiceListener)); verify(serviceDiscovery, times(1)).addServiceInstancesChangedListener(multiAppsInstanceListener); ArgumentCaptor<List<URL>> captor = ArgumentCaptor.forClass(List.class); verify(testServiceListener).notify(captor.capture()); assertEquals(urls, captor.getValue()); // different interface mapping to the same apps NotifyListener testServiceListener2 = mock(NotifyListener.class); URL url2 = URL.valueOf("tri://127.0.0.1/TestService2?interface=TestService2&check=false&protocol=tri"); when(testServiceListener2.getConsumerUrl()).thenReturn(url2); serviceDiscoveryRegistry.subscribeURLs(url2, testServiceListener2, multiApps); // check instance listeners not changed, methods not called assertEquals(2, serviceDiscoveryRegistry.getServiceListeners().size()); assertEquals( multiAppsInstanceListener, serviceDiscoveryRegistry.getServiceListeners().get(toStringKeys(multiApps))); verify(multiAppsInstanceListener, times(1)).addListenerAndNotify(any(), eq(testServiceListener)); // still called once, not executed this time verify(serviceDiscovery, times(2)).addServiceInstancesChangedListener(multiAppsInstanceListener); // check different protocol Map<String, Set<ServiceInstancesChangedListener.NotifyListenerWithKey>> serviceListeners = multiAppsInstanceListener.getServiceListeners(); assertEquals(2, serviceListeners.size()); assertEquals(1, serviceListeners.get(url.getServiceKey()).size()); assertEquals(1, serviceListeners.get(url2.getServiceKey()).size()); ProtocolServiceKey protocolServiceKey = new ProtocolServiceKey( url2.getServiceInterface(), url2.getVersion(), url2.getGroup(), url2.getParameter(PROTOCOL_KEY, DUBBO)); assertTrue(serviceListeners .get(url2.getServiceKey()) .contains(new ServiceInstancesChangedListener.NotifyListenerWithKey( protocolServiceKey, testServiceListener2))); }
@Override public X509Certificate[] getAcceptedIssuers() { X509Certificate[] issuers = EMPTY; X509TrustManager tm = trustManagerRef.get(); if (tm != null) { issuers = tm.getAcceptedIssuers(); } return issuers; }
@Test public void testNoPassword() throws Exception { KeyPair kp = generateKeyPair("RSA"); cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA"); cert2 = generateCertificate("CN=Cert2", kp, 30, "SHA1withRSA"); String truststoreLocation = BASEDIR + "/testreload.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); final ReloadingX509TrustManager tm = new ReloadingX509TrustManager("jks", truststoreLocation, null); try { fileMonitoringTimer.schedule(new FileMonitoringTimerTask( Paths.get(truststoreLocation), tm::loadFrom,null), 10, 10); assertEquals(1, tm.getAcceptedIssuers().length); } finally { fileMonitoringTimer.cancel(); } }
void prepareAndDumpMetadata(String taskId) { Map<String, String> metadata = new LinkedHashMap<>(); metadata.put("projectKey", moduleHierarchy.root().key()); metadata.put("serverUrl", server.getPublicRootUrl()); metadata.put("serverVersion", server.getVersion()); properties.branch().ifPresent(branch -> metadata.put("branch", branch)); URL dashboardUrl = buildDashboardUrl(server.getPublicRootUrl(), moduleHierarchy.root().key()); metadata.put("dashboardUrl", dashboardUrl.toExternalForm()); URL taskUrl = HttpUrl.parse(server.getPublicRootUrl()).newBuilder() .addPathSegment("api").addPathSegment("ce").addPathSegment("task") .addQueryParameter(ID, taskId) .build() .url(); metadata.put("ceTaskId", taskId); metadata.put("ceTaskUrl", taskUrl.toExternalForm()); ceTaskReportDataHolder.init(taskId, taskUrl.toExternalForm(), dashboardUrl.toExternalForm()); dumpMetadata(metadata); }
@Test public void dump_public_url_if_defined_for_main_branch() throws IOException { when(server.getPublicRootUrl()).thenReturn("https://publicserver/sonarqube"); underTest.prepareAndDumpMetadata("TASK-123"); assertThat(readFileToString(properties.metadataFilePath().toFile(), StandardCharsets.UTF_8)).isEqualTo( "projectKey=org.sonarsource.sonarqube:sonarqube\n" + "serverUrl=https://publicserver/sonarqube\n" + "serverVersion=6.4\n" + "dashboardUrl=https://publicserver/sonarqube/dashboard?id=org.sonarsource.sonarqube%3Asonarqube\n" + "ceTaskId=TASK-123\n" + "ceTaskUrl=https://publicserver/sonarqube/api/ce/task?id=TASK-123\n"); }
@Override public void onSubscribe(final AppAuthData appAuthData) { SignAuthDataCache.getInstance().cacheAuthData(appAuthData); }
@Test void onSubscribe() { AppAuthData appAuthData = new AppAuthData(); appAuthData.setAppKey("D9FD95F496C9495DB5604222A13C3D08"); appAuthData.setAppSecret("02D25048AA1E466F8920E68B08E668DE"); appAuthData.setEnabled(true); signAuthDataSubscriber.onSubscribe(appAuthData); assertEquals(SignAuthDataCache.getInstance().obtainAuthData("D9FD95F496C9495DB5604222A13C3D08"), appAuthData); }
public static Environment of(@NonNull Properties props) { var environment = new Environment(); environment.props = props; return environment; }
@Test public void testNoEnvByFileString() { String path = BladeConst.CLASSPATH + "/application.properties"; Environment.of("file:" + path); }
Set<Integer> changedLines() { return tracker.changedLines(); }
@Test public void do_not_count_deleted_line() throws IOException { String example = "diff --git a/file-b1.xoo b/file-b1.xoo\n" + "index 0000000..c2a9048\n" + "--- a/foo\n" + "+++ b/bar\n" + "@@ -1 +0,0 @@\n" + "-deleted line\n"; printDiff(example); assertThat(underTest.changedLines()).isEmpty(); }
public List<PartitionInfo> getTopicMetadata(String topic, boolean allowAutoTopicCreation, Timer timer) { MetadataRequest.Builder request = new MetadataRequest.Builder(Collections.singletonList(topic), allowAutoTopicCreation); Map<String, List<PartitionInfo>> topicMetadata = getTopicMetadata(request, timer); return topicMetadata.get(topic); }
@Test public void testGetTopicMetadataLeaderNotAvailable() { buildFetcher(); assignFromUser(singleton(tp0)); client.prepareResponse(newMetadataResponse(Errors.LEADER_NOT_AVAILABLE)); client.prepareResponse(newMetadataResponse(Errors.NONE)); List<PartitionInfo> topicMetadata = topicMetadataFetcher.getTopicMetadata(topicName, true, time.timer(5000L)); assertNotNull(topicMetadata); }
private Mono<ServerResponse> listCategories(ServerRequest request) { CategoryPublicQuery query = new CategoryPublicQuery(request.exchange()); return client.listBy(Category.class, query.toListOptions(), query.toPageRequest()) .map(listResult -> toAnotherListResult(listResult, CategoryVo::from)) .flatMap(result -> ServerResponse.ok() .contentType(MediaType.APPLICATION_JSON) .bodyValue(result) ); }
@Test void listCategories() { ListResult<Category> listResult = new ListResult<>(List.of()); when(client.listBy(eq(Category.class), any(ListOptions.class), any(PageRequest.class))) .thenReturn(Mono.just(listResult)); webTestClient.get() .uri("/categories?page=1&size=10") .exchange() .expectStatus().isOk() .expectHeader().contentType(MediaType.APPLICATION_JSON) .expectBody() .jsonPath("$.total").isEqualTo(listResult.getTotal()) .jsonPath("$.items").isArray(); }
public int append(MemoryRecords records) throws IOException { if (records.sizeInBytes() > Integer.MAX_VALUE - size.get()) throw new IllegalArgumentException("Append of size " + records.sizeInBytes() + " bytes is too large for segment with current file position at " + size.get()); int written = records.writeFullyTo(channel); size.getAndAdd(written); return written; }
@Test public void testAppendProtectsFromOverflow() throws Exception { File fileMock = mock(File.class); FileChannel fileChannelMock = mock(FileChannel.class); when(fileChannelMock.size()).thenReturn((long) Integer.MAX_VALUE); FileRecords records = new FileRecords(fileMock, fileChannelMock, 0, Integer.MAX_VALUE, false); assertThrows(IllegalArgumentException.class, () -> append(records, values)); }
@VisibleForTesting public static <ConfigT> ConfigT payloadToConfig( ExternalConfigurationPayload payload, Class<ConfigT> configurationClass) { try { return payloadToConfigSchema(payload, configurationClass); } catch (NoSuchSchemaException schemaException) { LOG.warn( "Configuration class '{}' has no schema registered. Attempting to construct with setter" + " approach.", configurationClass.getName()); try { return payloadToConfigSetters(payload, configurationClass); } catch (ReflectiveOperationException e) { throw new IllegalArgumentException( String.format( "Failed to construct instance of configuration class '%s'", configurationClass.getName()), e); } } }
@Test public void testExternalConfiguration_simpleSchema() throws Exception { ExternalTransforms.ExternalConfigurationPayload externalConfig = encodeRowIntoExternalConfigurationPayload( Row.withSchema( Schema.of( Field.of("bar", FieldType.STRING), Field.of("foo", FieldType.INT64), Field.of("list", FieldType.array(FieldType.STRING)))) .withFieldValue("foo", 1L) .withFieldValue("bar", "test string") .withFieldValue("list", ImmutableList.of("abc", "123")) .build()); TestConfigSimpleSchema config = ExpansionService.payloadToConfig(externalConfig, TestConfigSimpleSchema.class); assertThat(config.getFoo(), Matchers.is(1L)); assertThat(config.getBar(), Matchers.is("test string")); assertThat(config.getList(), Matchers.is(ImmutableList.of("abc", "123"))); }
@Override public Optional<ExecuteResult> getSaneQueryResult(final SQLStatement sqlStatement, final SQLException ex) { return Optional.empty(); }
@Test void assertGetSaneQueryResult() { assertThat(new PostgreSQLDialectSaneQueryResultEngine().getSaneQueryResult(null, null), is(Optional.empty())); }
@Override public void commit() { if (context == null || context.isEmpty()) { return; } LOGGER.info("Commit started"); if (context.containsKey(UnitActions.INSERT.getActionValue())) { commitInsert(); } if (context.containsKey(UnitActions.MODIFY.getActionValue())) { commitModify(); } if (context.containsKey(UnitActions.DELETE.getActionValue())) { commitDelete(); } LOGGER.info("Commit finished."); }
@Test void shouldSaveAllLocalChangesToDb() { context.put(UnitActions.INSERT.getActionValue(), List.of(weapon1)); context.put(UnitActions.MODIFY.getActionValue(), List.of(weapon1)); context.put(UnitActions.DELETE.getActionValue(), List.of(weapon1)); armsDealer.commit(); verify(weaponDatabase, times(1)).insert(weapon1); verify(weaponDatabase, times(1)).modify(weapon1); verify(weaponDatabase, times(1)).delete(weapon1); }
@SuppressWarnings("unchecked") @Override public Object handle(ProceedingJoinPoint proceedingJoinPoint, CircuitBreaker circuitBreaker, String methodName) throws Throwable { CircuitBreakerOperator circuitBreakerOperator = CircuitBreakerOperator.of(circuitBreaker); Object returnValue = proceedingJoinPoint.proceed(); return executeRxJava3Aspect(circuitBreakerOperator, returnValue, methodName); }
@Test public void testRxTypes() throws Throwable { CircuitBreaker circuitBreaker = CircuitBreaker.ofDefaults("test"); when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test")); assertThat(rxJava3CircuitBreakerAspectExt .handle(proceedingJoinPoint, circuitBreaker, "testMethod")).isNotNull(); when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test")); assertThat(rxJava3CircuitBreakerAspectExt .handle(proceedingJoinPoint, circuitBreaker, "testMethod")).isNotNull(); }
@Override public CompletableFuture<Boolean> triggerCheckpointAsync( CheckpointMetaData checkpointMetaData, CheckpointOptions checkpointOptions) { checkForcedFullSnapshotSupport(checkpointOptions); CompletableFuture<Boolean> result = new CompletableFuture<>(); mainMailboxExecutor.execute( () -> { try { boolean noUnfinishedInputGates = Arrays.stream(getEnvironment().getAllInputGates()) .allMatch(InputGate::isFinished); if (noUnfinishedInputGates) { result.complete( triggerCheckpointAsyncInMailbox( checkpointMetaData, checkpointOptions)); } else { result.complete( triggerUnfinishedChannelsCheckpoint( checkpointMetaData, checkpointOptions)); } } catch (Exception ex) { // Report the failure both via the Future result but also to the mailbox result.completeExceptionally(ex); throw ex; } }, "checkpoint %s with %s", checkpointMetaData, checkpointOptions); return result; }
@Test void testUncaughtExceptionInAsynchronousCheckpointingOperation() throws Exception { final RuntimeException failingCause = new RuntimeException("Test exception"); FailingDummyEnvironment failingDummyEnvironment = new FailingDummyEnvironment(failingCause); // mock the returned snapshots OperatorSnapshotFutures operatorSnapshotResult = new OperatorSnapshotFutures( ExceptionallyDoneFuture.of(failingCause), DoneFuture.of(SnapshotResult.empty()), DoneFuture.of(SnapshotResult.empty()), DoneFuture.of(SnapshotResult.empty()), DoneFuture.of(SnapshotResult.empty()), DoneFuture.of(SnapshotResult.empty())); final TestingUncaughtExceptionHandler uncaughtExceptionHandler = new TestingUncaughtExceptionHandler(); RunningTask<MockStreamTask> task = runTask( () -> new MockStreamTask( failingDummyEnvironment, operatorChain( streamOperatorWithSnapshot(operatorSnapshotResult)), uncaughtExceptionHandler)); MockStreamTask streamTask = task.streamTask; waitTaskIsRunning(streamTask, task.invocationFuture); streamTask.triggerCheckpointAsync( new CheckpointMetaData(42L, 1L), CheckpointOptions.forCheckpointWithDefaultLocation()); final Throwable uncaughtException = uncaughtExceptionHandler.waitForUncaughtException(); assertThat(uncaughtException).isSameAs(failingCause); streamTask.finishInput(); task.waitForTaskCompletion(false); }
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload, final ConnectionSession connectionSession) { switch (commandPacketType) { case COM_QUIT: return new MySQLComQuitPacket(); case COM_INIT_DB: return new MySQLComInitDbPacket(payload); case COM_FIELD_LIST: return new MySQLComFieldListPacket(payload); case COM_QUERY: return new MySQLComQueryPacket(payload); case COM_STMT_PREPARE: return new MySQLComStmtPreparePacket(payload); case COM_STMT_EXECUTE: MySQLServerPreparedStatement serverPreparedStatement = connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex())); return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount()); case COM_STMT_SEND_LONG_DATA: return new MySQLComStmtSendLongDataPacket(payload); case COM_STMT_RESET: return new MySQLComStmtResetPacket(payload); case COM_STMT_CLOSE: return new MySQLComStmtClosePacket(payload); case COM_SET_OPTION: return new MySQLComSetOptionPacket(payload); case COM_PING: return new MySQLComPingPacket(); case COM_RESET_CONNECTION: return new MySQLComResetConnectionPacket(); default: return new MySQLUnsupportedCommandPacket(commandPacketType); } }
@Test void assertNewInstanceWithComStmtResetPacket() { assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_STMT_RESET, payload, connectionSession), instanceOf(MySQLComStmtResetPacket.class)); }
@SuppressWarnings("checkstyle:npathcomplexity") public static <T extends Throwable> T tryCreateExceptionWithMessageAndCause(Class<T> exceptionClass, String message, @Nullable Throwable cause) { for (ConstructorMethod method : ConstructorMethod.values()) { try { return method.cloneException(exceptionClass, message, cause); } catch (ClassCastException | WrongMethodTypeException | IllegalAccessException | SecurityException | NoSuchMethodException | ClassNotFoundException ignored) { } catch (Throwable t) { throw new RuntimeException("Exception creation failed ", t); } } return null; }
@Test public void testCanCreateExceptionsWithMessageAndCauseWhenExceptionHasCauseSetImplicitlyByNoArgumentConstructor() { ExceptionUtil.tryCreateExceptionWithMessageAndCause( ExceptionThatHasCauseImplicitlyByNoArgumentConstructor.class, "", new RuntimeException() ); }
public RuntimeOptionsBuilder parse(String... args) { return parse(Arrays.asList(args)); }
@Test void ensure_threads_param_is_used() { RuntimeOptions options = parser .parse("--threads", "10") .build(); assertThat(options.getThreads(), is(10)); }
public final Logger getLogger(final Class<?> clazz) { return getLogger(clazz.getName()); }
@Test public void testMultiLevel() { Logger wxyz = lc.getLogger("w.x.y.z"); LoggerTestHelper.assertNameEquals(wxyz, "w.x.y.z"); LoggerTestHelper.assertLevels(null, wxyz, Level.DEBUG); Logger wx = lc.getLogger("w.x"); wx.setLevel(Level.INFO); LoggerTestHelper.assertNameEquals(wx, "w.x"); LoggerTestHelper.assertLevels(Level.INFO, wx, Level.INFO); LoggerTestHelper.assertLevels(null, lc.getLogger("w.x.y"), Level.INFO); LoggerTestHelper.assertLevels(null, wxyz, Level.INFO); }
static void writeIntegerLittleEndian(OutputStream outputStream, int value) throws IOException { outputStream.write(0xFF & value); outputStream.write(0xFF & (value >> 8)); outputStream.write(0xFF & (value >> 16)); outputStream.write(0xFF & (value >> 24)); }
@Test public void testWriteIntegerLittleEndian() throws Exception { testWriteIntegerLittleEndian(0, bytes(0, 0, 0, 0)); testWriteIntegerLittleEndian(42, bytes(42, 0, 0, 0)); testWriteIntegerLittleEndian(Integer.MAX_VALUE - 5, bytes(0xFA, 0xFF, 0xFF, 0x7F)); testWriteIntegerLittleEndian(-7, bytes(0xF9, 0xFF, 0xFF, 0xFF)); }
public static boolean isEligibleForCarbonsDelivery(final Message stanza) { // To properly handle messages exchanged with a MUC (or similar service), the server must be able to identify MUC-related messages. // This can be accomplished by tracking the clients' presence in MUCs, or by checking for the <x xmlns="http://jabber.org/protocol/muc#user"> // element in messages. The following rules apply to MUC-related messages: if (stanza.getChildElement("x", "http://jabber.org/protocol/muc#user") != null) { // A <message/> containing a Direct MUC Invitations (XEP-0249) SHOULD be carbon-copied. if (containsChildElement(stanza, Set.of("x"), "jabber:x:conference")) { return true; } // A <message/> containing a Mediated Invitation SHOULD be carbon-copied. if (stanza.getChildElement("x", "http://jabber.org/protocol/muc#user") != null && stanza.getChildElement("x", "http://jabber.org/protocol/muc#user").element("invite") != null) { return true; } // A private <message/> from a local user to a MUC participant (sent to a full JID) SHOULD be carbon-copied // The server SHOULD limit carbon-copying to the clients sharing a Multi-Session Nick in that MUC, and MAY // inject the <x/> element into such carbon copies. Clients can not respond to carbon-copies of MUC-PMs // related to a MUC they are not joined to. Therefore, they SHOULD either ignore such carbon copies, or // provide a way for the user to join the MUC before answering. if (stanza.getTo() != null && stanza.getTo().getResource() != null && stanza.getFrom() != null && stanza.getFrom().getNode() != null && XMPPServer.getInstance().isLocal(stanza.getFrom())) { return true; // TODO The server SHOULD limit carbon-copying to the clients sharing a Multi-Session Nick in that MUC (OF-2780). } // A private <message/> from a MUC participant (received from a full JID) to a local user SHOULD NOT be // carbon-copied (these messages are already replicated by the MUC service to all joined client instances). if (stanza.getFrom() != null && stanza.getFrom().getResource() != null && stanza.getTo() != null && stanza.getTo().getNode() != null && XMPPServer.getInstance().isLocal(stanza.getTo())) { return false; } } // A <message/> of type "groupchat" SHOULD NOT be carbon-copied. if (stanza.getType() == Message.Type.groupchat) { return false; } // A <message/> is eligible for carbons delivery if it does not contain a <private/> child element... if (containsChildElement(stanza, Set.of("private", "received"), "urn:xmpp:carbons")) { return false; } // and if at least one of the following is true: // ... it is of type "chat". if (stanza.getType() == Message.Type.chat) { return true; } // ... it is of type "normal" and contains a <body> element. if ((stanza.getType() == null || stanza.getType() == Message.Type.normal) && stanza.getBody() != null) { return true; } // ... it contains payload elements typically used in IM if (containsChildElement(stanza, Set.of("request", "received"), "urn:xmpp:receipts") // Message Delivery Receipts (XEP-0184) || containsChildElement(stanza, Set.of("active", "inactive", "gone", "composing", "paused"), "http://jabber.org/protocol/chatstates") // Chat State Notifications (XEP-0085) || (containsChildElement(stanza, Set.of("markable", "received", "displayed", "acknowledged"), "urn:xmpp:chat-markers")) // Chat Markers (XEP-0333)). ) { return true; } // ... it is of type "error" and it was sent in response to a <message/> that was eligible for carbons delivery. // TODO implement me (OF-2779) return false; }
@Test public void testMucPrivateMessageSent() throws Exception { // Setup test fixture. final Message input = new Message(); input.setTo("room@domain/nick"); input.setFrom(new JID("user", Fixtures.XMPP_DOMAIN, "resource")); input.setType(Message.Type.chat); input.setBody("test"); input.getElement().addElement("x", "http://jabber.org/protocol/muc#user"); // Execute system under test. final boolean result = Forwarded.isEligibleForCarbonsDelivery(input); // Verify results. assertTrue(result); }
@Override public void verify(String value) { long l = Long.parseLong(value); if (l < min || l > max) { throw new RuntimeException(format("value is not in range(%d, %d)", min, max)); } }
@Test public void verify_ValueGreaterThanMax_ThrowsRuntimeException() { RuntimeException exception = assertThrows(RuntimeException.class, () -> longRangeAttribute.verify("101")); assertEquals("value is not in range(0, 100)", exception.getMessage()); }
@Override public int compare(String indexName1, String indexName2) { int separatorPosition = indexName1.lastIndexOf(separator); int index1Number; final String indexPrefix1 = separatorPosition != -1 ? indexName1.substring(0, separatorPosition) : indexName1; try { index1Number = Integer.parseInt(indexName1.substring(separatorPosition + 1)); } catch (Exception e) { index1Number = Integer.MIN_VALUE; //wrongly formatted index names go last } separatorPosition = indexName2.lastIndexOf(separator); int index2Number; final String indexPrefix2 = separatorPosition != -1 ? indexName2.substring(0, separatorPosition) : indexName2; try { index2Number = Integer.parseInt(indexName2.substring(separatorPosition + 1)); } catch (NumberFormatException e) { index2Number = Integer.MIN_VALUE; //wrongly formatted index names go last } final int prefixComparisonResult = indexPrefix1.compareTo(indexPrefix2); if (prefixComparisonResult == 0) { return -Integer.compare(index1Number, index2Number); } else { return prefixComparisonResult; } }
@Test void isImmuneToWrongNumbersWhichGoLast() { assertTrue(comparator.compare("lalala_1!1", "lalala_3") > 0); assertTrue(comparator.compare("lalala_3", "lalala_1!1") < 0); }
public static void trimRecordTemplate(RecordTemplate recordTemplate, MaskTree override, final boolean failOnMismatch) { trimRecordTemplate(recordTemplate.data(), recordTemplate.schema(), override, failOnMismatch); }
@Test public void testOverrideMaskNestedWithMap() throws CloneNotSupportedException { TyperefTest test = new TyperefTest(); RecordBar bar = new RecordBar(); bar.setLocation("foo"); bar.data().put("bar", "keep me"); RecordBar expected = bar.clone(); test.setBarRefMap(new RecordBarMap()); test.getBarRefMap().put("foo", bar); MaskTree maskTree = new MaskTree(); maskTree.addOperation(new PathSpec("barRefMap", PathSpec.WILDCARD, "location"), MaskOperation.POSITIVE_MASK_OP); maskTree.addOperation(new PathSpec("barRefMap", PathSpec.WILDCARD, "bar"), MaskOperation.POSITIVE_MASK_OP); RestUtils.trimRecordTemplate(test, maskTree, false); Assert.assertEquals(test.getBarRefMap().get("foo"), expected); }
@Override public Long dbSize(RedisClusterNode node) { return execute(node, RedisCommands.DBSIZE); }
@Test public void testDbSize() { RedisClusterNode master = getFirstMaster(); Long size = connection.dbSize(master); assertThat(size).isZero(); }
public KsqlEntityList execute( final KsqlSecurityContext securityContext, final List<ParsedStatement> statements, final SessionProperties sessionProperties ) { final KsqlEntityList entities = new KsqlEntityList(); for (final ParsedStatement parsed : statements) { final PreparedStatement<?> prepared = ksqlEngine.prepare( parsed, (isVariableSubstitutionEnabled(sessionProperties) ? sessionProperties.getSessionVariables() : Collections.emptyMap()) ); executeStatement( securityContext, prepared, sessionProperties, entities ).ifPresent(entities::add); } return entities; }
@Test public void shouldWaitForDistributedStatements() { // Given final KsqlEntity entity1 = mock(KsqlEntity.class); final KsqlEntity entity2 = mock(KsqlEntity.class); final KsqlEntity entity3 = mock(KsqlEntity.class); final StatementExecutor<CreateStream> customExecutor = givenReturningExecutor( CreateStream.class, entity1, entity2, entity3); givenRequestHandler( ImmutableMap.of(CreateStream.class, customExecutor) ); final List<ParsedStatement> statements = KSQL_PARSER.parse( "CREATE STREAM x WITH (value_format='json', kafka_topic='x');" + "CREATE STREAM y WITH (value_format='json', kafka_topic='y');" + "CREATE STREAM z WITH (value_format='json', kafka_topic='z');" ); // When handler.execute(securityContext, statements, sessionProperties); // Then verify(sync).waitFor(argThat(hasItems(entity1, entity2)), any()); // since the entities passed into sync#waitFor are always the same object, mockito // cannot verify the original two arguments verify(sync, times(3)).waitFor(any(), any()); }
public MatchStep(String raw) { boolean each = false; raw = raw.trim(); if (raw.startsWith("each")) { each = true; raw = raw.substring(4).trim(); } boolean contains = false; boolean not = false; boolean only = false; boolean any = false; boolean deep = false; int spacePos = raw.indexOf(' '); int leftParenPos = raw.indexOf('('); int rightParenPos = raw.indexOf(')'); int lhsEndPos = raw.indexOf(" contains"); if (lhsEndPos == -1) { lhsEndPos = raw.indexOf(" !contains"); } int searchPos = 0; int eqPos = raw.indexOf(" == "); if (eqPos == -1) { eqPos = raw.indexOf(" != "); } if (lhsEndPos != -1 && (eqPos == -1 || eqPos > lhsEndPos)) { contains = true; not = raw.charAt(lhsEndPos + 1) == '!'; searchPos = lhsEndPos + (not ? 10 : 9); String anyOrOnlyOrDeep = raw.substring(searchPos).trim(); if (anyOrOnlyOrDeep.startsWith("only deep")) { int onlyPos = raw.indexOf(" only deep", searchPos); only = true; deep = true; searchPos = onlyPos + 10; } else if (anyOrOnlyOrDeep.startsWith("only")) { int onlyPos = raw.indexOf(" only", searchPos); only = true; searchPos = onlyPos + 5; } else if (anyOrOnlyOrDeep.startsWith("any")) { int anyPos = raw.indexOf(" any", searchPos); any = true; searchPos = anyPos + 4; } else if (anyOrOnlyOrDeep.startsWith("deep")) { int deepPos = raw.indexOf(" deep", searchPos); deep = true; searchPos = deepPos + 5; } } else { int equalPos = raw.indexOf(" ==", searchPos); int notEqualPos = raw.indexOf(" !=", searchPos); if (equalPos == -1 && notEqualPos == -1) { throw new RuntimeException("syntax error, expected '==' for match"); } lhsEndPos = min(equalPos, notEqualPos); if (lhsEndPos > spacePos && rightParenPos != -1 && rightParenPos > lhsEndPos && rightParenPos < leftParenPos) { equalPos = raw.indexOf(" ==", rightParenPos); notEqualPos = raw.indexOf(" !=", rightParenPos); if (equalPos == -1 && notEqualPos == -1) { throw new RuntimeException("syntax error, expected '==' for match"); } lhsEndPos = min(equalPos, notEqualPos); } not = lhsEndPos == notEqualPos; searchPos = lhsEndPos + 3; } String lhs = raw.substring(0, lhsEndPos).trim(); if (leftParenPos == -1) { leftParenPos = lhs.indexOf('['); // used later to test for json-path char first = lhs.charAt(0); if (first == '[' || first == '{') { // json array or object spacePos = -1; // just use lhs lhs = "(" + lhs + ")"; } } if (spacePos != -1 && (leftParenPos > spacePos || leftParenPos == -1)) { name = lhs.substring(0, spacePos); path = StringUtils.trimToNull(lhs.substring(spacePos)); } else { name = lhs; path = null; } expected = StringUtils.trimToNull(raw.substring(searchPos)); type = getType(each, not, contains, only, any, deep); }
@Test void testMatchStep() { test("aXml //active == '#regex (false|true)'", EQUALS, "aXml", "//active", "'#regex (false|true)'"); test("hello ==", EQUALS, "hello", null, null); test("hello world == foo", EQUALS, "hello", "world", "foo"); test("hello world contains only deep foo", CONTAINS_ONLY_DEEP, "hello", "world", "foo"); test("each hello world == foo", EACH_EQUALS, "hello", "world", "foo"); test("each hello world contains deep foo", EACH_CONTAINS_DEEP, "hello", "world", "foo"); test("{\"a\":1,\"b\":2} == '#object'", EQUALS, "({\"a\":1,\"b\":2})", null, "'#object'"); test("hello.foo(bar) != blah", NOT_EQUALS, "hello.foo(bar)", null, "blah"); test("foo count(/records//record) contains any blah", CONTAINS_ANY, "foo", "count(/records//record)", "blah"); test("__arg == karate.get('foos[' + __loop + ']')", EQUALS, "__arg", null, "karate.get('foos[' + __loop + ']')"); test("response $[?(@.b=='ab')] == '#[1]'", EQUALS, "response", "$[?(@.b=='ab')]", "'#[1]'"); test("test != '#? _.length == 2'", NOT_EQUALS, "test", null, "'#? _.length == 2'"); test("actual[0] !contains badSchema", NOT_CONTAINS, "actual[0]", null, "badSchema"); test("actual[0] contains badSchema", CONTAINS, "actual[0]", null, "badSchema"); test("driver.eval('{ foo: \"bar\" }') == { hello: 'world' }", EQUALS, "driver.eval('{ foo: \"bar\" }')", null, "{ hello: 'world' }"); test("response.integration.serviceData['Usage Data'][0].Stage ==", EQUALS, "response.integration.serviceData['Usage Data'][0].Stage", null, null); test("response contains { foo: 'a any b' }", CONTAINS, "response", null, "{ foo: 'a any b' }"); test("response.foo == 'a contains b'", EQUALS, "response.foo", null, "'a contains b'"); test("$.addOns[?(@.entitlementStateChangeReason=='RESUBSCRIBE')].addOnOfferID contains only toAddOnOfferIDs", CONTAINS_ONLY, "$.addOns[?(@.entitlementStateChangeReason=='RESUBSCRIBE')].addOnOfferID", null, "toAddOnOfferIDs"); }
@Override public void createNamespace(Namespace namespace, Map<String, String> meta) { Preconditions.checkArgument( !namespace.isEmpty(), "Cannot create namespace with invalid name: %s", namespace); if (!meta.isEmpty()) { throw new UnsupportedOperationException( "Cannot create namespace " + namespace + ": metadata is not supported"); } Path nsPath = new Path(warehouseLocation, SLASH.join(namespace.levels())); if (isNamespace(nsPath)) { throw new AlreadyExistsException("Namespace already exists: %s", namespace); } try { fs.mkdirs(nsPath); } catch (IOException e) { throw new RuntimeIOException(e, "Create namespace failed: %s", namespace); } }
@Test public void testCreateNamespace() throws Exception { String warehouseLocation = tableDir.getAbsolutePath(); HadoopCatalog catalog = new HadoopCatalog(); catalog.setConf(new Configuration()); catalog.initialize( "hadoop", ImmutableMap.of(CatalogProperties.WAREHOUSE_LOCATION, warehouseLocation)); TableIdentifier tbl1 = TableIdentifier.of("db", "ns1", "ns2", "metadata"); TableIdentifier tbl2 = TableIdentifier.of("db", "ns2", "ns3", "tbl2"); Lists.newArrayList(tbl1, tbl2).forEach(t -> catalog.createNamespace(t.namespace(), META)); String metaLocation1 = warehouseLocation + "/" + "db/ns1/ns2"; FileSystem fs1 = Util.getFs(new Path(metaLocation1), catalog.getConf()); assertThat(fs1.isDirectory(new Path(metaLocation1))).isTrue(); String metaLocation2 = warehouseLocation + "/" + "db/ns2/ns3"; FileSystem fs2 = Util.getFs(new Path(metaLocation2), catalog.getConf()); assertThat(fs2.isDirectory(new Path(metaLocation2))).isTrue(); assertThatThrownBy(() -> catalog.createNamespace(tbl1.namespace())) .isInstanceOf(AlreadyExistsException.class) .hasMessage("Namespace already exists: " + tbl1.namespace()); }
public static Protos.PaymentACK createPaymentAck(Protos.Payment paymentMessage, @Nullable String memo) { final Protos.PaymentACK.Builder builder = Protos.PaymentACK.newBuilder(); builder.setPayment(paymentMessage); if (memo != null) builder.setMemo(memo); return builder.build(); }
@Test public void testPaymentAck() throws Exception { // Create Payment paymentMessage = Protos.Payment.newBuilder().build(); PaymentACK paymentAck = PaymentProtocol.createPaymentAck(paymentMessage, MEMO); byte[] paymentAckBytes = paymentAck.toByteArray(); // Parse PaymentACK parsedPaymentAck = PaymentACK.parseFrom(paymentAckBytes); assertEquals(paymentMessage, parsedPaymentAck.getPayment()); assertEquals(MEMO, parsedPaymentAck.getMemo()); }
@Override public boolean equals(Object o) { return o instanceof DnsRecordType && ((DnsRecordType) o).intValue == intValue; }
@Test public void testEquals() throws Exception { for (DnsRecordType t1 : allTypes()) { for (DnsRecordType t2 : allTypes()) { if (t1 != t2) { assertNotEquals(t1, t2); } } } }
public static boolean safeCollectionEquals(final Collection<Comparable<?>> sources, final Collection<Comparable<?>> targets) { List<Comparable<?>> all = new ArrayList<>(sources); all.addAll(targets); Optional<Class<?>> clazz = getTargetNumericType(all); if (!clazz.isPresent()) { return sources.equals(targets); } List<Comparable<?>> sourceClasses = sources.stream().map(each -> parseNumberByClazz(each.toString(), clazz.get())).collect(Collectors.toList()); List<Comparable<?>> targetClasses = targets.stream().map(each -> parseNumberByClazz(each.toString(), clazz.get())).collect(Collectors.toList()); return sourceClasses.equals(targetClasses); }
@Test void assertSafeCollectionEqualsForDouble() { List<Comparable<?>> sources = Arrays.asList(10.01, 12.01); List<Comparable<?>> targets = Arrays.asList(10.01F, 12.01); assertTrue(SafeNumberOperationUtils.safeCollectionEquals(sources, targets)); }
private void writeLargeRecord(ByteBuffer record, int subpartitionId, Buffer.DataType dataType) { checkState(dataType != Buffer.DataType.EVENT_BUFFER); while (record.hasRemaining()) { int toCopy = Math.min(record.remaining(), bufferSizeBytes); MemorySegment writeBuffer = requestBuffer().getMemorySegment(); writeBuffer.put(0, record, toCopy); int numRemainingConsecutiveBuffers = (int) Math.ceil(((double) record.remaining()) / bufferSizeBytes); if (numRemainingConsecutiveBuffers == 0) { dataType = Buffer.DataType.DATA_BUFFER_WITH_CLEAR_END; } flushBuffer( new BufferWithSubpartition( new NetworkBuffer( writeBuffer, checkNotNull(bufferRecycler), dataType, toCopy), subpartitionId), numRemainingConsecutiveBuffers); } releaseFreeBuffers(); }
@Test void testWriteLargeRecord() throws IOException { testWriteLargeRecord(true); }
StreamsProducer threadProducer() { return activeTaskCreator.threadProducer(); }
@Test public void shouldCommitViaProducerIfEosV2Enabled() { final StreamsProducer producer = mock(StreamsProducer.class); when(activeTaskCreator.threadProducer()).thenReturn(producer); final Map<TopicPartition, OffsetAndMetadata> offsetsT01 = singletonMap(t1p1, new OffsetAndMetadata(0L, null)); final Map<TopicPartition, OffsetAndMetadata> offsetsT02 = singletonMap(t1p2, new OffsetAndMetadata(1L, null)); final Map<TopicPartition, OffsetAndMetadata> allOffsets = new HashMap<>(); allOffsets.putAll(offsetsT01); allOffsets.putAll(offsetsT02); shouldCommitViaProducerIfEosEnabled(ProcessingMode.EXACTLY_ONCE_V2, offsetsT01, offsetsT02); verify(producer).commitTransaction(allOffsets, new ConsumerGroupMetadata("appId")); verifyNoMoreInteractions(producer); }
@VisibleForTesting static Estimate calculateNullsFraction(String column, Collection<PartitionStatistics> partitionStatistics) { List<PartitionStatistics> statisticsWithKnownRowCountAndNullsCount = partitionStatistics.stream() .filter(statistics -> { if (!statistics.getBasicStatistics().getRowCount().isPresent()) { return false; } HiveColumnStatistics columnStatistics = statistics.getColumnStatistics().get(column); if (columnStatistics == null) { return false; } return columnStatistics.getNullsCount().isPresent(); }) .collect(toImmutableList()); if (statisticsWithKnownRowCountAndNullsCount.isEmpty()) { return Estimate.unknown(); } long totalNullsCount = 0; long totalRowCount = 0; for (PartitionStatistics statistics : statisticsWithKnownRowCountAndNullsCount) { long rowCount = statistics.getBasicStatistics().getRowCount().orElseThrow(() -> new VerifyException("rowCount is not present")); verify(rowCount >= 0, "rowCount must be greater than or equal to zero"); HiveColumnStatistics columnStatistics = statistics.getColumnStatistics().get(column); verify(columnStatistics != null, "columnStatistics is null"); long nullsCount = columnStatistics.getNullsCount().orElseThrow(() -> new VerifyException("nullsCount is not present")); verify(nullsCount >= 0, "nullsCount must be greater than or equal to zero"); verify(nullsCount <= rowCount, "nullsCount must be less than or equal to rowCount. nullsCount: %s. rowCount: %s.", nullsCount, rowCount); totalNullsCount += nullsCount; totalRowCount += rowCount; } if (totalRowCount == 0) { return Estimate.zero(); } verify( totalNullsCount <= totalRowCount, "totalNullsCount must be less than or equal to totalRowCount. totalNullsCount: %s. totalRowCount: %s.", totalNullsCount, totalRowCount); return Estimate.of(((double) totalNullsCount) / totalRowCount); }
@Test public void testCalculateNullsFraction() { assertEquals(calculateNullsFraction(COLUMN, ImmutableList.of()), Estimate.unknown()); assertEquals(calculateNullsFraction(COLUMN, ImmutableList.of(PartitionStatistics.empty())), Estimate.unknown()); assertEquals(calculateNullsFraction(COLUMN, ImmutableList.of(rowsCount(1000))), Estimate.unknown()); assertEquals(calculateNullsFraction(COLUMN, ImmutableList.of(rowsCount(1000), nullsCount(500))), Estimate.unknown()); assertEquals(calculateNullsFraction(COLUMN, ImmutableList.of(rowsCount(1000), nullsCount(500), rowsCountAndNullsCount(1000, 500))), Estimate.of(0.5)); assertEquals(calculateNullsFraction(COLUMN, ImmutableList.of(rowsCountAndNullsCount(2000, 200), rowsCountAndNullsCount(1000, 100))), Estimate.of(0.1)); assertEquals(calculateNullsFraction(COLUMN, ImmutableList.of(rowsCountAndNullsCount(0, 0), rowsCountAndNullsCount(0, 0))), Estimate.of(0)); }
public Collection<EvaluatedCondition> getEvaluatedConditions() { return evaluatedConditions; }
@Test public void addCondition_accepts_null_value() { builder.addEvaluatedCondition(CONDITION_1, EvaluatedCondition.EvaluationStatus.NO_VALUE, null); assertThat(builder.getEvaluatedConditions()) .containsOnly(new EvaluatedCondition(CONDITION_1, EvaluatedCondition.EvaluationStatus.NO_VALUE, null)); }
@Override public byte[] encode(ILoggingEvent event) { final int initialCapacity = event.getThrowableProxy() == null ? DEFAULT_SIZE : DEFAULT_SIZE_WITH_THROWABLE; StringBuilder sb = new StringBuilder(initialCapacity); sb.append(OPEN_OBJ); if (withSequenceNumber) { appenderMemberWithLongValue(sb, SEQUENCE_NUMBER_ATTR_NAME, event.getSequenceNumber()); sb.append(VALUE_SEPARATOR); } if (withTimestamp) { appenderMemberWithLongValue(sb, TIMESTAMP_ATTR_NAME, event.getTimeStamp()); sb.append(VALUE_SEPARATOR); } if (withNanoseconds) { appenderMemberWithLongValue(sb, NANOSECONDS_ATTR_NAME, event.getNanoseconds()); sb.append(VALUE_SEPARATOR); } if (withLevel) { String levelStr = event.getLevel() != null ? event.getLevel().levelStr : NULL_STR; appenderMember(sb, LEVEL_ATTR_NAME, levelStr); sb.append(VALUE_SEPARATOR); } if (withThreadName) { appenderMember(sb, THREAD_NAME_ATTR_NAME, jsonEscape(event.getThreadName())); sb.append(VALUE_SEPARATOR); } if (withLoggerName) { appenderMember(sb, LOGGER_ATTR_NAME, event.getLoggerName()); sb.append(VALUE_SEPARATOR); } if (withContext) { appendLoggerContext(sb, event.getLoggerContextVO()); sb.append(VALUE_SEPARATOR); } if (withMarkers) appendMarkers(sb, event); if (withMDC) appendMDC(sb, event); if (withKVPList) appendKeyValuePairs(sb, event); if (withMessage) { appenderMember(sb, MESSAGE_ATTR_NAME, jsonEscape(event.getMessage())); sb.append(VALUE_SEPARATOR); } if (withFormattedMessage) { appenderMember(sb, FORMATTED_MESSAGE_ATTR_NAME, jsonEscape(event.getFormattedMessage())); sb.append(VALUE_SEPARATOR); } if (withArguments) appendArgumentArray(sb, event); if (withThrowable) appendThrowableProxy(sb, THROWABLE_ATTR_NAME, event.getThrowableProxy()); sb.append(CLOSE_OBJ); sb.append(CoreConstants.JSON_LINE_SEPARATOR); return sb.toString().getBytes(UTF_8_CHARSET); }
@Test void withMarkers() throws JsonProcessingException { LoggingEvent event = new LoggingEvent("x", logger, Level.WARN, "hello", null, null); event.addMarker(markerA); event.addMarker(markerB); byte[] resultBytes = jsonEncoder.encode(event); String resultString = new String(resultBytes, StandardCharsets.UTF_8); //System.out.println(resultString); JsonLoggingEvent resultEvent = stringToLoggingEventMapper.mapStringToLoggingEvent(resultString); compareEvents(event, resultEvent); }
public static void updateAttributes(final LoadBalancer.Subchannel subchannel, final Attributes attributes) { setAttributeValue(subchannel, WEIGHT_KEY, attributes); setAttributeValue(subchannel, STATSU_KEY, attributes); }
@Test public void testUpdateAttributes() { final LoadBalancer.Subchannel subchannel = mock(LoadBalancer.Subchannel.class); SubChannels.updateAttributes(subchannel, mock(Attributes.class)); }
public ConfigResponse resolveConfig(GetConfigRequest request) { ConfigKey<?> configKey = request.getConfigKey(); validateConfigDefinition(request.getConfigKey(), request.getDefContent()); return responseFactory.createResponse(model.getConfig(configKey).toUtf8Array(true), generation, false, request.configPayloadChecksums()); }
@Test(expected = UnknownConfigDefinitionException.class) public void test_unknown_config_definition() { PayloadChecksums payloadChecksums = PayloadChecksums.empty(); Request request = createWithParams(new ConfigKey<>("foo", "id", "bar", null), DefContent.fromList(List.of()), "fromHost", payloadChecksums, 1, 1, Trace.createDummy(), CompressionType.UNCOMPRESSED, Optional.empty()) .getRequest(); JRTServerConfigRequestV3 v3Request = JRTServerConfigRequestV3.createFromRequest(request); handler.resolveConfig(v3Request); }
int getCurrentVersion() { return this.currentVersion; }
@Test void version_is_1_upon_construction() { final StateVersionTracker versionTracker = createWithMockedMetrics(); assertEquals(1, versionTracker.getCurrentVersion()); }
public String getWhereClause() { if (partitionId.equals(CassandraPartition.UNPARTITIONED_ID)) { if (splitCondition != null) { return " WHERE " + splitCondition; } else { return ""; } } else { if (splitCondition != null) { return " WHERE " + partitionId + " AND " + splitCondition; } else { return " WHERE " + partitionId; } } }
@Test public void testWhereClause() { CassandraSplit split; split = new CassandraSplit( "connectorId", "schema1", "table1", CassandraPartition.UNPARTITIONED_ID, "token(k) >= 0 AND token(k) <= 2", addresses); assertEquals(split.getWhereClause(), " WHERE token(k) >= 0 AND token(k) <= 2"); split = new CassandraSplit( "connectorId", "schema1", "table1", "key = 123", null, addresses); assertEquals(split.getWhereClause(), " WHERE key = 123"); }
public int getRestPort() { return flinkConfig.get(RestOptions.PORT); }
@Test void testGetRestPort() { flinkConfig.set(RestOptions.PORT, 12345); assertThat(kubernetesJobManagerParameters.getRestPort()).isEqualTo(12345); }
@Override public ProtobufSystemInfo.Section toProtobuf() { ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder(); protobuf.setName("System"); setAttribute(protobuf, "Server ID", server.getId()); setAttribute(protobuf, "Edition", sonarRuntime.getEdition().getLabel()); setAttribute(protobuf, NCLOC.getName() ,statisticsSupport.getLinesOfCode()); setAttribute(protobuf, "Container", containerSupport.isRunningInContainer()); setAttribute(protobuf, "High Availability", true); setAttribute(protobuf, "External Users and Groups Provisioning", commonSystemInformation.getManagedInstanceProviderName()); setAttribute(protobuf, "External User Authentication", commonSystemInformation.getExternalUserAuthentication()); addIfNotEmpty(protobuf, "Accepted external identity providers", commonSystemInformation.getEnabledIdentityProviders()); addIfNotEmpty(protobuf, "External identity providers whose users are allowed to sign themselves up", commonSystemInformation.getAllowsToSignUpEnabledIdentityProviders()); setAttribute(protobuf, "Force authentication", commonSystemInformation.getForceAuthentication()); return protobuf.build(); }
@Test public void name_is_not_empty() { assertThat(underTest.toProtobuf().getName()).isEqualTo("System"); }
@Override public <T> List<T> toList(DataTable dataTable, Type itemType) { requireNonNull(dataTable, "dataTable may not be null"); requireNonNull(itemType, "itemType may not be null"); if (dataTable.isEmpty()) { return emptyList(); } ListOrProblems<T> result = toListOrProblems(dataTable, itemType); if (result.hasList()) { return unmodifiableList(result.getList()); } throw listNoConverterDefined( itemType, result.getProblems()); }
@Test void to_list__single_column__throws_exception__register_transformer() { DataTable table = parse("", "| ♘ |", "| ♝ |"); CucumberDataTableException exception = assertThrows( CucumberDataTableException.class, () -> converter.toList(table, Piece.class)); assertThat(exception.getMessage(), is("" + "Can't convert DataTable to List<io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$Piece>.\n" + "Please review these problems:\n" + "\n" + " - There was no table entry or table row transformer registered for io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$Piece.\n" + " Please consider registering a table entry or row transformer.\n" + "\n" + " - There was no table cell transformer registered for io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$Piece.\n" + " Please consider registering a table cell transformer.\n" + "\n" + " - There was no default table entry transformer registered to transform io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$Piece.\n" + " Please consider registering a default table entry transformer.\n" + "\n" + " - There was no default table cell transformer registered to transform io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$Piece.\n" + " Please consider registering a default table cell transformer.\n" + "\n" + "Note: Usually solving one is enough")); }
public static DynamicVoter parse(String input) { input = input.trim(); int atIndex = input.indexOf("@"); if (atIndex < 0) { throw new IllegalArgumentException("No @ found in dynamic voter string."); } if (atIndex == 0) { throw new IllegalArgumentException("Invalid @ at beginning of dynamic voter string."); } String idString = input.substring(0, atIndex); int nodeId; try { nodeId = Integer.parseInt(idString); } catch (NumberFormatException e) { throw new IllegalArgumentException("Failed to parse node id in dynamic voter string.", e); } if (nodeId < 0) { throw new IllegalArgumentException("Invalid negative node id " + nodeId + " in dynamic voter string."); } input = input.substring(atIndex + 1); if (input.isEmpty()) { throw new IllegalArgumentException("No hostname found after node id."); } String host; if (input.startsWith("[")) { int endBracketIndex = input.indexOf("]"); if (endBracketIndex < 0) { throw new IllegalArgumentException("Hostname began with left bracket, but no right " + "bracket was found."); } host = input.substring(1, endBracketIndex); input = input.substring(endBracketIndex + 1); } else { int endColonIndex = input.indexOf(":"); if (endColonIndex < 0) { throw new IllegalArgumentException("No colon following hostname could be found."); } host = input.substring(0, endColonIndex); input = input.substring(endColonIndex); } if (!input.startsWith(":")) { throw new IllegalArgumentException("Port section must start with a colon."); } input = input.substring(1); int endColonIndex = input.indexOf(":"); if (endColonIndex < 0) { throw new IllegalArgumentException("No colon following port could be found."); } String portString = input.substring(0, endColonIndex); int port; try { port = Integer.parseInt(portString); } catch (NumberFormatException e) { throw new IllegalArgumentException("Failed to parse port in dynamic voter string.", e); } if (port < 0 || port > 65535) { throw new IllegalArgumentException("Invalid port " + port + " in dynamic voter string."); } String directoryIdString = input.substring(endColonIndex + 1); Uuid directoryId; try { directoryId = Uuid.fromString(directoryIdString); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Failed to parse directory ID in dynamic voter string.", e); } return new DynamicVoter(directoryId, nodeId, host, port); }
@Test public void testFailedToParseNodeId() { assertEquals("Failed to parse node id in dynamic voter string.", assertThrows(IllegalArgumentException.class, () -> DynamicVoter.parse("blah@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ")). getMessage()); }
@JsonCreator public static AuditEventType create(@JsonProperty(FIELD_NAMESPACE) String namespace, @JsonProperty(FIELD_OBJECT) String object, @JsonProperty(FIELD_ACTION) String action) { return new AutoValue_AuditEventType(namespace, object, action); }
@Test public void testInvalid1() throws Exception { expectedException.expect(IllegalArgumentException.class); AuditEventType.create("foo"); }
@Override public SplitResult<OffsetRange> trySplit(double fractionOfRemainder) { // If current tracking range is no longer growable, split it as a normal range. if (range.getTo() != Long.MAX_VALUE || range.getTo() == range.getFrom()) { return super.trySplit(fractionOfRemainder); } // If current range has been done, there is no more space to split. if (lastAttemptedOffset != null && lastAttemptedOffset == Long.MAX_VALUE) { return null; } BigDecimal cur = (lastAttemptedOffset == null) ? BigDecimal.valueOf(range.getFrom()).subtract(BigDecimal.ONE, MathContext.DECIMAL128) : BigDecimal.valueOf(lastAttemptedOffset); // Fetch the estimated end offset. If the estimated end is smaller than the next offset, use // the next offset as end. BigDecimal estimateRangeEnd = BigDecimal.valueOf(rangeEndEstimator.estimate()) .max(cur.add(BigDecimal.ONE, MathContext.DECIMAL128)); // Convert to BigDecimal in computation to prevent overflow, which may result in loss of // precision. // split = cur + max(1, (estimateRangeEnd - cur) * fractionOfRemainder) BigDecimal splitPos = cur.add( estimateRangeEnd .subtract(cur, MathContext.DECIMAL128) .multiply(BigDecimal.valueOf(fractionOfRemainder), MathContext.DECIMAL128) .max(BigDecimal.ONE), MathContext.DECIMAL128); long split = splitPos.longValue(); if (split > estimateRangeEnd.longValue()) { return null; } OffsetRange res = new OffsetRange(split, range.getTo()); this.range = new OffsetRange(range.getFrom(), split); return SplitResult.of(range, res); }
@Test public void testCheckpointAfterAllProcessed() throws Exception { SimpleEstimator simpleEstimator = new SimpleEstimator(); GrowableOffsetRangeTracker tracker = new GrowableOffsetRangeTracker(0L, simpleEstimator); assertFalse(tracker.tryClaim(Long.MAX_VALUE)); tracker.checkDone(); assertNull(tracker.trySplit(0)); }
@Override public void deletePermission(String role, String resource, String action) { String sql = "DELETE FROM permissions WHERE role=? AND resource=? AND action=?"; EmbeddedStorageContextHolder.addSqlContext(sql, role, resource, action); databaseOperate.blockUpdate(); }
@Test void testDeletePermission() { embeddedPermissionPersistService.deletePermission("role", "resource", "action"); List<ModifyRequest> currentSqlContext = EmbeddedStorageContextHolder.getCurrentSqlContext(); Mockito.verify(databaseOperate).blockUpdate(); }
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) { log.info("Starting to validate internal topics {}.", topicConfigs.keySet()); final long now = time.milliseconds(); final long deadline = now + retryTimeoutMs; final ValidationResult validationResult = new ValidationResult(); final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet()); final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet()); while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) { Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap(); if (!topicDescriptionsStillToValidate.isEmpty()) { final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate); descriptionsForTopic = describeTopicsResult.topicNameValues(); } Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap(); if (!topicConfigsStillToValidate.isEmpty()) { final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs( topicConfigsStillToValidate.stream() .map(topic -> new ConfigResource(Type.TOPIC, topic)) .collect(Collectors.toSet()) ); configsForTopic = describeConfigsResult.values().entrySet().stream() .collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue)); } while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { if (!descriptionsForTopic.isEmpty()) { doValidateTopic( validationResult, descriptionsForTopic, topicConfigs, topicDescriptionsStillToValidate, (streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide) ); } if (!configsForTopic.isEmpty()) { doValidateTopic( validationResult, configsForTopic, topicConfigs, topicConfigsStillToValidate, (streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide) ); } maybeThrowTimeoutException( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, String.format("Could not validate internal topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs) ); if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { Utils.sleep(100); } } maybeSleep( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, "validated" ); } log.info("Completed validation of internal topics {}.", topicConfigs.keySet()); return validationResult; }
@Test public void shouldThrowTimeoutExceptionWhenTimeoutIsExceededDuringValidation() { final AdminClient admin = mock(AdminClient.class); final MockTime time = new MockTime( (Integer) config.get(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG)) / 3 ); final InternalTopicManager topicManager = new InternalTopicManager( time, admin, new StreamsConfig(config) ); final KafkaFutureImpl<TopicDescription> topicDescriptionFailFuture = new KafkaFutureImpl<>(); topicDescriptionFailFuture.completeExceptionally(new TimeoutException()); when(admin.describeTopics(Collections.singleton(topic1))) .thenAnswer(answer -> new MockDescribeTopicsResult(mkMap(mkEntry(topic1, topicDescriptionFailFuture)))); final KafkaFutureImpl<Config> topicConfigSuccessfulFuture = new KafkaFutureImpl<>(); topicConfigSuccessfulFuture.complete( new Config(repartitionTopicConfig().entrySet().stream() .map(entry -> new ConfigEntry(entry.getKey(), entry.getValue())).collect(Collectors.toSet())) ); final ConfigResource topicResource = new ConfigResource(Type.TOPIC, topic1); when(admin.describeConfigs(Collections.singleton(topicResource))) .thenAnswer(answer -> new MockDescribeConfigsResult(mkMap(mkEntry(topicResource, topicConfigSuccessfulFuture)))); final InternalTopicConfig internalTopicConfig = setupRepartitionTopicConfig(topic1, 1); assertThrows( TimeoutException.class, () -> topicManager.validate(Collections.singletonMap(topic1, internalTopicConfig)) ); }
protected KllHistogramEstimator mergeHistogramEstimator( String columnName, KllHistogramEstimator oldEst, KllHistogramEstimator newEst) { if (oldEst != null && newEst != null) { if (oldEst.canMerge(newEst)) { LOG.trace("Merging old sketch {} with new sketch {}...", oldEst.getSketch(), newEst.getSketch()); oldEst.mergeEstimators(newEst); LOG.trace("Resulting sketch is {}", oldEst.getSketch()); return oldEst; } LOG.debug("Merging histograms of column {}", columnName); } else if (newEst != null) { LOG.trace("Old sketch is empty, the new sketch is used {}", newEst.getSketch()); return newEst; } return oldEst; }
@Test public void testMergeHistogramEstimatorsSecondNull() { KllHistogramEstimator estimator1 = KllHistogramEstimatorFactory.getKllHistogramEstimator(KLL_1.toByteArray()); KllHistogramEstimator computedEstimator = MERGER.mergeHistogramEstimator("", estimator1, null); assertEquals(estimator1.getSketch().toString(), computedEstimator.getSketch().toString()); }
public void onFragment(final DirectBuffer buffer, final int offset, final int length, final Header header) { final byte flags = header.flags(); if ((flags & UNFRAGMENTED) == UNFRAGMENTED) { delegate.onFragment(buffer, offset, length, header); } else { handleFragment(buffer, offset, length, header, flags); } }
@Test void shouldAssembleTwoPartMessage() { final UnsafeBuffer srcBuffer = new UnsafeBuffer(new byte[1024 + (2 * HEADER_LENGTH)]); final int length = 512; int offset = HEADER_LENGTH; headerFlyweight.flags(FrameDescriptor.BEGIN_FRAG_FLAG); assembler.onFragment(srcBuffer, offset, length, header); offset = BitUtil.align(offset + length + HEADER_LENGTH, FRAME_ALIGNMENT); headerFlyweight.flags(FrameDescriptor.END_FRAG_FLAG); assembler.onFragment(srcBuffer, offset, length, header); final ArgumentCaptor<Header> headerArg = ArgumentCaptor.forClass(Header.class); verify(delegateFragmentHandler, times(1)).onFragment( any(), eq(0), eq(length * 2), headerArg.capture()); final Header capturedHeader = headerArg.getValue(); assertEquals(SESSION_ID, capturedHeader.sessionId()); assertEquals(FrameDescriptor.UNFRAGMENTED, capturedHeader.flags()); }
void writeLogs(OutputStream out, Instant from, Instant to, long maxLines, Optional<String> hostname) { double fromSeconds = from.getEpochSecond() + from.getNano() / 1e9; double toSeconds = to.getEpochSecond() + to.getNano() / 1e9; long linesWritten = 0; BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out)); for (List<Path> logs : getMatchingFiles(from, to)) { List<LogLineIterator> logLineIterators = new ArrayList<>(); try { // Logs in each sub-list contain entries covering the same time interval, so do a merge sort while reading for (Path log : logs) logLineIterators.add(new LogLineIterator(log, fromSeconds, toSeconds, hostname)); Iterator<LineWithTimestamp> lines = Iterators.mergeSorted(logLineIterators, Comparator.comparingDouble(LineWithTimestamp::timestamp)); PriorityQueue<LineWithTimestamp> heap = new PriorityQueue<>(Comparator.comparingDouble(LineWithTimestamp::timestamp)); while (lines.hasNext()) { heap.offer(lines.next()); if (heap.size() > 1000) { if (linesWritten++ >= maxLines) return; writer.write(heap.poll().line); writer.newLine(); } } while ( ! heap.isEmpty()) { if (linesWritten++ >= maxLines) return; writer.write(heap.poll().line); writer.newLine(); } } catch (IOException e) { throw new UncheckedIOException(e); } finally { for (LogLineIterator ll : logLineIterators) { try { ll.close(); } catch (IOException ignored) { } } Exceptions.uncheck(writer::flush); } } }
@EnabledIf("hasZstdcat") @Test void testZippedStreaming() { ByteArrayOutputStream zippedBaos = new ByteArrayOutputStream(); LogReader logReader = new LogReader(logDirectory, Pattern.compile(".*")); logReader.writeLogs(zippedBaos, Instant.EPOCH, Instant.EPOCH.plus(Duration.ofDays(2)), 100, Optional.empty()); assertEquals(log101 + log100b + log100a + logv11 + log110 + log200 + logv, zippedBaos.toString(UTF_8)); }
public static String byteCountToDisplaySize(long size) { if (size < 1024L) { return String.valueOf(size) + (size > 1 ? " bytes" : " byte"); } long exp = (long) (Math.log(size) / Math.log((long) 1024)); double value = size / Math.pow((long) 1024, exp); char unit = "KMGTPEZY".charAt((int) exp - 1); return String.format("%.1f %s%s", value, unit, "B"); }
@Test public void shouldConvertBytesToMegaForFloat() { assertThat(FileSizeUtils.byteCountToDisplaySize(1 * 1024 * 1024 + 512 * 1024), is("1.5 MB")); }
@SuppressWarnings("argument") static Status runSqlLine( String[] args, @Nullable InputStream inputStream, @Nullable OutputStream outputStream, @Nullable OutputStream errorStream) throws IOException { String[] modifiedArgs = checkConnectionArgs(args); SqlLine sqlLine = new SqlLine(); if (outputStream != null) { sqlLine.setOutputStream(new PrintStream(outputStream, false, StandardCharsets.UTF_8.name())); } if (errorStream != null) { sqlLine.setErrorStream(new PrintStream(errorStream, false, StandardCharsets.UTF_8.name())); } return sqlLine.begin(modifiedArgs, inputStream, true); }
@Test public void testSqlLine_slidingWindow() throws Exception { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); String[] args = buildArgs( "CREATE EXTERNAL TABLE table_test (col_a VARCHAR, col_b TIMESTAMP) TYPE 'test';", "INSERT INTO table_test SELECT '3', TIMESTAMP '2018-07-01 21:26:06';", "INSERT INTO table_test SELECT '4', TIMESTAMP '2018-07-01 21:26:07';", "INSERT INTO table_test SELECT '6', TIMESTAMP '2018-07-01 21:26:08';", "INSERT INTO table_test SELECT '7', TIMESTAMP '2018-07-01 21:26:09';", "SELECT HOP_END(col_b, INTERVAL '1' SECOND, INTERVAL '2' SECOND), count(*) FROM " + "table_test GROUP BY HOP(col_b, INTERVAL '1' SECOND, INTERVAL '2' SECOND);"); BeamSqlLine.runSqlLine(args, null, byteArrayOutputStream, null); List<List<String>> lines = toLines(byteArrayOutputStream); assertThat( Arrays.asList( Arrays.asList("2018-07-01 21:26:07", "1"), Arrays.asList("2018-07-01 21:26:08", "2"), Arrays.asList("2018-07-01 21:26:09", "2"), Arrays.asList("2018-07-01 21:26:10", "2"), Arrays.asList("2018-07-01 21:26:11", "1")), everyItem(is(oneOf(lines.toArray())))); }
public static PredicateTreeAnnotations createPredicateTreeAnnotations(Predicate predicate) { PredicateTreeAnalyzerResult analyzerResult = PredicateTreeAnalyzer.analyzePredicateTree(predicate); // The tree size is used as the interval range. int intervalEnd = analyzerResult.treeSize; AnnotatorContext context = new AnnotatorContext(intervalEnd, analyzerResult.sizeMap); assignIntervalLabels(predicate, Interval.INTERVAL_BEGIN, intervalEnd, false, context); return new PredicateTreeAnnotations( analyzerResult.minFeature, intervalEnd, context.intervals, context.intervalsWithBounds, context.featureConjunctions); }
@Test void require_that_nots_get_correct_intervals() { Predicate p = and( feature("key").inSet("value"), not(feature("key").inSet("value")), feature("key").inSet("value"), not(feature("key").inSet("value"))); PredicateTreeAnnotations r = PredicateTreeAnnotator.createPredicateTreeAnnotations(p); assertEquals(2, r.minFeature); assertEquals(6, r.intervalEnd); assertEquals(2, r.intervalMap.size()); assertIntervalContains(r, "key=value", 0x00010001, 0x00020002, 0x00040004, 0x00050005); assertIntervalContains(r, Feature.Z_STAR_COMPRESSED_ATTRIBUTE_NAME, 0x00020001, 0x00050004); }
public void checkValidRow(int blockMaxRow, List<Integer> rowNumbers) { checkRowSize(blockMaxRow, rowNumbers); checkValidRowNumber(blockMaxRow, rowNumbers); checkDuplicateRowNumber(rowNumbers); }
@Test public void 열_생성_요청에_중복이_없고_블록_최대_열_번호를_만족하면_에러를_반환하지_않는다() { // given final int blockMaxRow = 5; List<Integer> rowNumbers = List.of(1, 2, 3, 4, 5); // when // then assertDoesNotThrow(() -> createBlockService.checkValidRow(blockMaxRow, rowNumbers)); }
@Override public boolean schemaExists(SnowflakeIdentifier schema) { Preconditions.checkArgument( schema.type() == SnowflakeIdentifier.Type.SCHEMA, "schemaExists requires a SCHEMA identifier, got '%s'", schema); if (!databaseExists(SnowflakeIdentifier.ofDatabase(schema.databaseName()))) { return false; } final String finalQuery = "SHOW TABLES IN SCHEMA IDENTIFIER(?) LIMIT 1"; try { connectionPool.run( conn -> queryHarness.query( conn, finalQuery, TABLE_RESULT_SET_HANDLER, schema.toIdentifierString())); } catch (SQLException e) { if (SCHEMA_NOT_FOUND_ERROR_CODES.contains(e.getErrorCode())) { return false; } throw new UncheckedSQLException(e, "Failed to check if schema '%s' exists", schema); } catch (InterruptedException e) { throw new UncheckedInterruptedException( e, "Interrupted while checking if schema '%s' exists", schema); } return true; }
@Test public void testSchemaFailureWithOtherException() throws SQLException { Exception injectedException = new SQLException("Some other exception", "2000", 2, null); when(mockResultSet.next()) // The Database exists check should pass, followed by Error code 2 for Schema exists .thenReturn(true) .thenReturn(false) .thenThrow(injectedException); when(mockResultSet.getString("name")).thenReturn("DB1").thenReturn("SCHEMA1"); when(mockResultSet.getString("database_name")).thenReturn("DB1"); assertThatExceptionOfType(UncheckedSQLException.class) .isThrownBy( () -> snowflakeClient.schemaExists(SnowflakeIdentifier.ofSchema("DB_1", "SCHEMA_2"))) .withMessageContaining("Failed to check if schema 'SCHEMA: 'DB_1.SCHEMA_2'' exists") .withCause(injectedException); }
@Override public void update() { if (patrollingLeft) { position -= 1; if (position == PATROLLING_LEFT_BOUNDING) { patrollingLeft = false; } } else { position += 1; if (position == PATROLLING_RIGHT_BOUNDING) { patrollingLeft = true; } } logger.info("Skeleton {} is on position {}.", id, position); }
@Test void testUpdateForPatrollingRight() { skeleton.patrollingLeft = false; skeleton.setPosition(50); skeleton.update(); assertEquals(51, skeleton.getPosition()); }
public void updateComputeNodeState(final String instanceId, final InstanceState instanceState) { repository.persistEphemeral(ComputeNode.getComputeNodeStateNodePath(instanceId), instanceState.name()); }
@Test void assertUpdateComputeNodeState() { new ComputeNodePersistService(repository).updateComputeNodeState("foo_instance_id", InstanceState.OK); verify(repository).persistEphemeral(ComputeNode.getComputeNodeStateNodePath("foo_instance_id"), InstanceState.OK.name()); }
private static String getProperty(String name, Configuration configuration) { return Optional.of(configuration.getStringArray(relaxPropertyName(name))) .filter(values -> values.length > 0) .map(Arrays::stream) .map(stream -> stream.collect(Collectors.joining(","))) .orElse(null); }
@Test public void assertPropertiesFromBaseConfiguration() throws ConfigurationException { PropertiesConfiguration propertiesConfiguration = CommonsConfigurationUtils.fromPath( PropertiesConfiguration.class.getClassLoader().getResource("pinot-configuration-1.properties").getFile(), true, PropertyIOFactoryKind.ConfigFileIOFactory); PinotConfiguration config = new PinotConfiguration(propertiesConfiguration); Assert.assertEquals(config.getProperty("pinot.server.storage.factory.class.s3"), "org.apache.pinot.plugin.filesystem.S3PinotFS"); Assert.assertEquals(config.getProperty("pinot.server.segment.fetcher.protocols"), "file,http,s3"); }
@Override public void add(Double value) { this.max = Math.max(this.max, value); }
@Test void testAdd() { DoubleMaximum max = new DoubleMaximum(); max.add(1234.5768); max.add(9876.5432); max.add(-987.6543); max.add(-123.4567); assertThat(max.getLocalValue()).isCloseTo(9876.5432, within(0.0)); }
@Override public List<IndexSetConfig> findAll() { return ImmutableList.copyOf((Iterator<? extends IndexSetConfig>) collection.find().sort(DBSort.asc("title"))); }
@Test @MongoDBFixtures("MongoIndexSetServiceTest.json") public void findAll() throws Exception { final List<IndexSetConfig> configs = indexSetService.findAll(); assertThat(configs) .isNotEmpty() .hasSize(3) .containsExactly( IndexSetConfig.create( "57f3d721a43c2d59cb750001", "Test 1", "This is the index set configuration for Test 1", true, true, "test_1", 4, 1, MessageCountRotationStrategy.class.getCanonicalName(), MessageCountRotationStrategyConfig.create(1000), NoopRetentionStrategy.class.getCanonicalName(), NoopRetentionStrategyConfig.create(10), ZonedDateTime.of(2016, 10, 4, 17, 0, 0, 0, ZoneOffset.UTC), "standard", "test_1", null, 1, false ), IndexSetConfig.create( "57f3d721a43c2d59cb750002", "Test 2", null, true, false, "test_2", 1, 0, MessageCountRotationStrategy.class.getCanonicalName(), MessageCountRotationStrategyConfig.create(2500), NoopRetentionStrategy.class.getCanonicalName(), NoopRetentionStrategyConfig.create(25), ZonedDateTime.of(2016, 10, 4, 18, 0, 0, 0, ZoneOffset.UTC), "standard", "test_2", null, 1, false ), IndexSetConfig.create( "57f3d721a43c2d59cb750003", "Test 3", "This is the index set configuration for Test 3 - with an index set index template", true, null, "test_3", 1, 0, MessageCountRotationStrategy.class.getCanonicalName(), MessageCountRotationStrategyConfig.create(2500), NoopRetentionStrategy.class.getCanonicalName(), NoopRetentionStrategyConfig.create(25), ZonedDateTime.of(2016, 10, 4, 18, 0, 0, 0, ZoneOffset.UTC), "standard", "test_3", EVENT_TEMPLATE_TYPE, 1, false ) ); }
@Override public void append(final LogEvent event) { if(null == event.getMessage()) { return; } // Category name final String logger = String.format("%s %s", event.getThreadName(), event.getLoggerName()); Level level = event.getLevel(); if(Level.FATAL.equals(level) || Level.ERROR.equals(level)) { this.log(OS_LOG_TYPE_ERROR, logger, event.getMessage().toString()); } else if(Level.TRACE.equals(level)) { this.log(OS_LOG_TYPE_DEBUG, logger, event.getMessage().toString()); } else if(Level.DEBUG.equals(level) || Level.INFO.equals(level)) { this.log(OS_LOG_TYPE_INFO, logger, event.getMessage().toString()); } else { this.log(OS_LOG_TYPE_DEFAULT, logger, event.getMessage().toString()); } if(ignoreExceptions()) { // Appender responsible for rendering final Throwable thrown = event.getThrown(); if(thrown != null) { final String[] trace = ExceptionUtils.getStackFrames(thrown); for(final String t : trace) { this.log(OS_LOG_TYPE_DEFAULT, logger, t); } } } }
@Test public void testAppend() { final UnifiedSystemLogAppender a = new UnifiedSystemLogAppender(); a.append(new Log4jLogEvent.Builder().setLoggerName(UnifiedSystemLogAppender.class.getCanonicalName()).setLevel(Level.DEBUG).setThrown(new RuntimeException()).setMessage(new SimpleMessage("Test")).build()); a.append(new Log4jLogEvent.Builder().setLoggerName(UnifiedSystemLogAppender.class.getCanonicalName()).setLevel(Level.ERROR).setThrown(new RuntimeException()).setMessage(new SimpleMessage("Test")).build()); }
static void populateEvaluateNodeWithPredicate(final BlockStmt toPopulate, final Predicate predicate, final List<Field<?>> fields) { // set predicate BlockStmt toAdd = getKiePMMLPredicate(PREDICATE, predicate, fields); final NodeList<Statement> predicateStatements = toAdd.getStatements(); for (int i = 0; i < predicateStatements.size(); i ++) { toPopulate.addStatement(i, predicateStatements.get(i)); } }
@Test void populateEvaluateNodeWithPredicateFunction() throws IOException { BlockStmt toPopulate = new BlockStmt(); KiePMMLNodeFactory.populateEvaluateNodeWithPredicate(toPopulate, compoundPredicateNode.getPredicate(), getFieldsFromDataDictionaryAndDerivedFields(dataDictionary2, derivedFields2)); String text = getFileContent(TEST_01_SOURCE); Statement expected = JavaParserUtils.parseBlock(text); assertThat(JavaParserUtils.equalsNode(expected, toPopulate)).isTrue(); }
public String toLoggableString(ApiMessage message) { MetadataRecordType type = MetadataRecordType.fromId(message.apiKey()); switch (type) { case CONFIG_RECORD: { if (!configSchema.isSensitive((ConfigRecord) message)) { return message.toString(); } ConfigRecord duplicate = ((ConfigRecord) message).duplicate(); duplicate.setValue("(redacted)"); return duplicate.toString(); } case USER_SCRAM_CREDENTIAL_RECORD: { UserScramCredentialRecord record = (UserScramCredentialRecord) message; return "UserScramCredentialRecord(" + "name=" + ((record.name() == null) ? "null" : "'" + record.name() + "'") + ", mechanism=" + record.mechanism() + ", salt=(redacted)" + ", storedKey=(redacted)" + ", serverKey=(redacted)" + ", iterations=" + record.iterations() + ")"; } default: return message.toString(); } }
@Test public void testNonSensitiveConfigRecordToString() { assertEquals("ConfigRecord(resourceType=4, resourceName='0', name='foobar', " + "value='item1,item2')", REDACTOR.toLoggableString(new ConfigRecord(). setResourceType(BROKER.id()). setResourceName("0"). setName("foobar"). setValue("item1,item2"))); }
@Override public SQLParserRuleConfiguration swapToObject(final YamlSQLParserRuleConfiguration yamlConfig) { CacheOption parseTreeCacheOption = null == yamlConfig.getParseTreeCache() ? DefaultSQLParserRuleConfigurationBuilder.PARSE_TREE_CACHE_OPTION : cacheOptionSwapper.swapToObject(yamlConfig.getParseTreeCache()); CacheOption sqlStatementCacheOption = null == yamlConfig.getSqlStatementCache() ? DefaultSQLParserRuleConfigurationBuilder.SQL_STATEMENT_CACHE_OPTION : cacheOptionSwapper.swapToObject(yamlConfig.getSqlStatementCache()); return new SQLParserRuleConfiguration(parseTreeCacheOption, sqlStatementCacheOption); }
@Test void assertSwapToObject() { YamlSQLParserRuleConfiguration yamlConfig = new YamlSQLParserRuleConfiguration(); yamlConfig.setParseTreeCache(new YamlSQLParserCacheOptionRuleConfiguration()); yamlConfig.getParseTreeCache().setInitialCapacity(2); yamlConfig.getParseTreeCache().setMaximumSize(5L); yamlConfig.setSqlStatementCache(new YamlSQLParserCacheOptionRuleConfiguration()); yamlConfig.getSqlStatementCache().setInitialCapacity(4); yamlConfig.getSqlStatementCache().setMaximumSize(7L); SQLParserRuleConfiguration actual = new YamlSQLParserRuleConfigurationSwapper().swapToObject(yamlConfig); assertThat(actual.getParseTreeCache().getInitialCapacity(), is(2)); assertThat(actual.getParseTreeCache().getMaximumSize(), is(5L)); assertThat(actual.getSqlStatementCache().getInitialCapacity(), is(4)); assertThat(actual.getSqlStatementCache().getMaximumSize(), is(7L)); }
@Override public List<Bar> aggregate(List<Bar> bars) { final List<Bar> aggregated = new ArrayList<>(); if (bars.isEmpty()) { return aggregated; } final Bar firstBar = bars.get(0); // get the actual time period final Duration actualDur = firstBar.getTimePeriod(); // check if new timePeriod is a multiplication of actual time period final boolean isMultiplication = timePeriod.getSeconds() % actualDur.getSeconds() == 0; if (!isMultiplication) { throw new IllegalArgumentException( "Cannot aggregate bars: the new timePeriod must be a multiplication of the actual timePeriod."); } int i = 0; final Num zero = firstBar.getOpenPrice().zero(); while (i < bars.size()) { Bar bar = bars.get(i); final ZonedDateTime beginTime = bar.getBeginTime(); final Num open = bar.getOpenPrice(); Num high = bar.getHighPrice(); Num low = bar.getLowPrice(); Num close = null; Num volume = zero; Num amount = zero; long trades = 0; Duration sumDur = Duration.ZERO; while (isInDuration(sumDur)) { if (i < bars.size()) { if (!beginTimesInDuration(beginTime, bars.get(i).getBeginTime())) { break; } bar = bars.get(i); if (high == null || bar.getHighPrice().isGreaterThan(high)) { high = bar.getHighPrice(); } if (low == null || bar.getLowPrice().isLessThan(low)) { low = bar.getLowPrice(); } close = bar.getClosePrice(); if (bar.getVolume() != null) { volume = volume.plus(bar.getVolume()); } if (bar.getAmount() != null) { amount = amount.plus(bar.getAmount()); } if (bar.getTrades() != 0) { trades = trades + bar.getTrades(); } } sumDur = sumDur.plus(actualDur); i++; } if (!onlyFinalBars || i <= bars.size()) { final Bar aggregatedBar = new BaseBar(timePeriod, beginTime.plus(timePeriod), open, high, low, close, volume, amount, trades); aggregated.add(aggregatedBar); } } return aggregated; }
@Test public void upscaledTo10DayBars() { final DurationBarAggregator barAggregator = new DurationBarAggregator(Duration.ofDays(10), true); final List<Bar> bars = barAggregator.aggregate(getOneDayBars()); // must be 1 bars assertEquals(1, bars.size()); // bar 1 must have ohlcv (1, 91, 4, 10, 293) final Bar bar1 = bars.get(0); final Num num1 = bar1.getOpenPrice(); assertNumEquals(num1.numOf(1), bar1.getOpenPrice()); assertNumEquals(num1.numOf(91), bar1.getHighPrice()); assertNumEquals(num1.numOf(4), bar1.getLowPrice()); assertNumEquals(num1.numOf(10), bar1.getClosePrice()); assertNumEquals(num1.numOf(293), bar1.getVolume()); }
@Override public URIStatus getStatus(AlluxioURI path, GetStatusPOptions options) throws FileDoesNotExistException, IOException, AlluxioException { URIStatus status = mMetadataCache.get(path); if (status == null || !status.isCompleted()) { try { status = mDelegatedFileSystem.getStatus(path, options); mMetadataCache.put(path, status); } catch (FileDoesNotExistException e) { mMetadataCache.put(path, NOT_FOUND_STATUS); throw e; } } else if (status == NOT_FOUND_STATUS) { throw new FileDoesNotExistException("Path \"" + path.getPath() + "\" does not exist."); } else if (options.getUpdateTimestamps()) { // Asynchronously send an RPC to master to update the access time. // Otherwise, if we need to synchronously send RPC to master to do this, // caching the status does not bring any benefit. asyncUpdateFileAccessTime(path); } return status; }
@Test public void getStatus() throws Exception { mFs.getStatus(FILE); assertEquals(1, mRpcCountingFs.getStatusRpcCount(FILE)); // The following getStatus gets from cache, so no RPC will be made. mFs.getStatus(FILE); assertEquals(1, mRpcCountingFs.getStatusRpcCount(FILE)); }
public TopicList getHasUnitSubTopicList() { TopicList topicList = new TopicList(); try { this.lock.readLock().lockInterruptibly(); for (Entry<String, Map<String, QueueData>> topicEntry : this.topicQueueTable.entrySet()) { String topic = topicEntry.getKey(); Map<String, QueueData> queueDatas = topicEntry.getValue(); if (queueDatas != null && queueDatas.size() > 0 && TopicSysFlag.hasUnitSubFlag(queueDatas.values().iterator().next().getTopicSysFlag())) { topicList.getTopicList().add(topic); } } } catch (Exception e) { log.error("getHasUnitSubTopicList Exception", e); } finally { this.lock.readLock().unlock(); } return topicList; }
@Test public void testGetHasUnitSubTopicList() { byte[] topicList = routeInfoManager.getHasUnitSubTopicList().encode(); assertThat(topicList).isNotNull(); }
@Override public DictTypeDO getDictType(Long id) { return dictTypeMapper.selectById(id); }
@Test public void testGetDictType_type() { // mock 数据 DictTypeDO dbDictType = randomDictTypeDO(); dictTypeMapper.insert(dbDictType); // 准备参数 String type = dbDictType.getType(); // 调用 DictTypeDO dictType = dictTypeService.getDictType(type); // 断言 assertNotNull(dictType); assertPojoEquals(dbDictType, dictType); }
@Override public String getTableName() { StringBuilder sb = new StringBuilder(); SQLServerOutputVisitor visitor = new SQLServerOutputVisitor(sb) { @Override public boolean visit(SQLExprTableSource x) { printTableSourceExpr(x.getExpr()); return false; } @Override public boolean visit(SQLJoinTableSource x) { throw new NotSupportYetException("not support the syntax of delete with join table"); } }; SQLTableSource tableSource; if (ast.getFrom() == null) { tableSource = ast.getTableSource(); } else { tableSource = ast.getFrom(); } if (tableSource instanceof SQLExprTableSource) { visitor.visit((SQLExprTableSource) tableSource); } else if (tableSource instanceof SQLJoinTableSource) { visitor.visit((SQLJoinTableSource) tableSource); } else { throw new NotSupportYetException("not support the syntax of delete with unknow"); } return sb.toString(); }
@Test public void deleteRecognizerTest_5() { String sql = "DELETE FROM t1 WHERE id in (SELECT id FROM t1)"; SQLStatement statement = getSQLStatement(sql); SqlServerDeleteRecognizer sqlServerDeleteRecognizer = new SqlServerDeleteRecognizer(sql, statement); Assertions.assertEquals(sql, sqlServerDeleteRecognizer.getOriginalSQL()); Assertions.assertEquals("t1", sqlServerDeleteRecognizer.getTableName()); Assertions.assertThrows(IllegalArgumentException.class, sqlServerDeleteRecognizer::getWhereCondition); }
@Override public void validate(String value, @Nullable List<String> options) { checkRequest(StringUtils.equalsIgnoreCase(value, "true") || StringUtils.equalsIgnoreCase(value, "false"), "Value '%s' must be one of \"true\" or \"false\".", value); }
@Test public void not_fail_on_valid_boolean() { underTest.validate("true", null); underTest.validate("True", null); underTest.validate("false", null); underTest.validate("FALSE", null); }
public String getEcosystem(DefCveItem cve) { final int[] ecosystemMap = new int[ECOSYSTEMS.length]; cve.getCve().getDescriptions().stream() .filter((langString) -> (langString.getLang().equals("en"))) .forEachOrdered((langString) -> search(langString.getValue(), ecosystemMap)); return getResult(ecosystemMap); }
@Test public void testScoring() throws IOException { DescriptionEcosystemMapper mapper = new DescriptionEcosystemMapper(); String value = "a.cpp b.java c.java"; assertEquals(JarAnalyzer.DEPENDENCY_ECOSYSTEM, mapper.getEcosystem(asCve(value))); }