focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override @SuppressWarnings("unchecked") public int run() throws IOException { Preconditions.checkArgument(targets != null && !targets.isEmpty(), "CSV path is required."); if (header != null) { // if a header is given on the command line, don't assume one is in the file noHeader = true; } CSVProperties props = new CSVProperties.Builder() .delimiter(delimiter) .escape(escape) .quote(quote) .header(header) .hasHeader(!noHeader) .linesToSkip(linesToSkip) .charset(charsetName) .build(); Schema csvSchema = null; if (avroSchemaFile != null) { csvSchema = Schemas.fromAvsc(open(avroSchemaFile)); } else { Set<String> required = ImmutableSet.of(); if (requiredFields != null) { required = ImmutableSet.copyOf(requiredFields); } String filename = new File(targets.get(0)).getName(); String recordName; if (filename.contains(".")) { recordName = filename.substring(0, filename.indexOf(".")); } else { recordName = filename; } // If the schema is not explicitly provided, // ensure that all input files share the same one. for (String target : targets) { Schema schema = AvroCSV.inferNullableSchema(recordName, open(target), props, required); if (csvSchema == null) { csvSchema = schema; } else if (!SchemaNormalization.toParsingForm(csvSchema) .equals(SchemaNormalization.toParsingForm(schema))) { throw new IllegalArgumentException(target + " seems to have a different schema from others. " + "Please specify the correct schema explicitly with the `--schema` option."); } } } try (ParquetWriter<Record> writer = AvroParquetWriter.<Record>builder(qualifiedPath(outputPath)) .withWriterVersion(v2 ? PARQUET_2_0 : PARQUET_1_0) .withWriteMode(overwrite ? ParquetFileWriter.Mode.OVERWRITE : ParquetFileWriter.Mode.CREATE) .withCompressionCodec(Codecs.parquetCodec(compressionCodecName)) .withDictionaryEncoding(true) .withDictionaryPageSize(dictionaryPageSize) .withPageSize(pageSize) .withRowGroupSize(rowGroupSize) .withDataModel(GenericData.get()) .withConf(getConf()) .withSchema(csvSchema) .build()) { for (String target : targets) { long count = 0; try (AvroCSVReader<Record> reader = new AvroCSVReader<>(open(target), props, csvSchema, Record.class, true)) { for (Record record : reader) { writer.write(record); count++; } } catch (RuntimeException e) { throw new RuntimeException("Failed on record " + count + " in file " + target, e); } } } return 0; }
@Test public void testConvertCSVCommandWithMultipleInput() throws IOException { File file = csvFile(); ConvertCSVCommand command = new ConvertCSVCommand(createLogger()); command.targets = Arrays.asList(file.getAbsolutePath(), file.getAbsolutePath()); File output = new File(getTempFolder(), getClass().getSimpleName() + ".parquet"); command.outputPath = output.getAbsolutePath(); command.setConf(new Configuration()); Assert.assertEquals(0, command.run()); Assert.assertTrue(output.exists()); }
static BigtableDataSettings translateWriteToVeneerSettings( @NonNull BigtableConfig config, @NonNull BigtableWriteOptions options, @Nullable BigtableWriteOptions optionsFromBigtableOptions, @NonNull PipelineOptions pipelineOptions) throws IOException { BigtableDataSettings.Builder settings = buildBigtableDataSettings(config, pipelineOptions); return configureWriteSettings(settings, options, optionsFromBigtableOptions); }
@Test public void testVeneerWriteSettings() throws Exception { BigtableConfig config = BigtableConfig.builder() .setProjectId(ValueProvider.StaticValueProvider.of("project")) .setInstanceId(ValueProvider.StaticValueProvider.of("instance")) .setAppProfileId(ValueProvider.StaticValueProvider.of("app")) .setValidate(true) .build(); BigtableWriteOptions writeOptions = BigtableWriteOptions.builder() .setTableId(ValueProvider.StaticValueProvider.of("table")) .setAttemptTimeout(org.joda.time.Duration.millis(101)) .setOperationTimeout(org.joda.time.Duration.millis(1001)) .setMaxElementsPerBatch(105) .setMaxBytesPerBatch(102) .setMaxOutstandingElements(10001) .setMaxOutstandingBytes(100001) .build(); PipelineOptions pipelineOptions = PipelineOptionsFactory.as(GcpOptions.class); BigtableDataSettings settings = BigtableConfigTranslator.translateWriteToVeneerSettings( config, writeOptions, null, pipelineOptions); EnhancedBigtableStubSettings stubSettings = settings.getStubSettings(); assertEquals(config.getProjectId().get(), stubSettings.getProjectId()); assertEquals(config.getInstanceId().get(), stubSettings.getInstanceId()); assertEquals(config.getAppProfileId().get(), stubSettings.getAppProfileId()); assertEquals( Duration.ofMillis(101), stubSettings.bulkMutateRowsSettings().getRetrySettings().getInitialRpcTimeout()); assertEquals( Duration.ofMillis(1001), stubSettings.bulkMutateRowsSettings().getRetrySettings().getTotalTimeout()); assertEquals( 105, (long) stubSettings.bulkMutateRowsSettings().getBatchingSettings().getElementCountThreshold()); assertEquals( 102, (long) stubSettings.bulkMutateRowsSettings().getBatchingSettings().getRequestByteThreshold()); assertEquals( 10001, (long) stubSettings .bulkMutateRowsSettings() .getBatchingSettings() .getFlowControlSettings() .getMaxOutstandingElementCount()); assertEquals( 100001, (long) stubSettings .bulkMutateRowsSettings() .getBatchingSettings() .getFlowControlSettings() .getMaxOutstandingRequestBytes()); }
static KiePMMLParameterField getKiePMMLParameterField(final ParameterField parameterField) { DATA_TYPE dataType = parameterField.getDataType() != null ? DATA_TYPE.byName(parameterField.getDataType().value()) : null; OP_TYPE opType = parameterField.getOpType() != null ? OP_TYPE.byName(parameterField.getOpType().value()) : null; return KiePMMLParameterField.builder(parameterField.getName(), Collections.emptyList()) .withDataType(dataType) .withOpType(opType) .withDisplayName(parameterField.getDisplayName()) .build(); }
@Test void getKiePMMLParameterField() { final String fieldName = "fieldName"; final ParameterField toConvert = getParameterField(fieldName); KiePMMLParameterField retrieved = KiePMMLParameterFieldInstanceFactory.getKiePMMLParameterField(toConvert); commonVerifyKiePMMLParameterField(retrieved, toConvert); }
@Override public void handleGlobalFailure(Throwable cause) { final FailureEnricher.Context ctx = DefaultFailureEnricherContext.forGlobalFailure( jobInfo, jobManagerJobMetricGroup, ioExecutor, userCodeClassLoader); final CompletableFuture<Map<String, String>> failureLabels = FailureEnricherUtils.labelFailure( cause, ctx, getMainThreadExecutor(), failureEnrichers); state.handleGlobalFailure(cause, failureLabels); }
@Test void testExceptionHistoryWithTaskConcurrentGlobalFailure() throws Exception { final Exception expectedException1 = new Exception("Expected Global Exception 1"); final Exception expectedException2 = new Exception("Expected Global Exception 2"); final BiConsumer<AdaptiveScheduler, List<ExecutionAttemptID>> testLogic = (scheduler, attemptIds) -> { scheduler.handleGlobalFailure(expectedException1); scheduler.handleGlobalFailure(expectedException2); }; final Iterable<RootExceptionHistoryEntry> entries = new ExceptionHistoryTester(singleThreadMainThreadExecutor) .withTestLogic(testLogic) .run(); assertThat(entries).hasSize(1); final RootExceptionHistoryEntry failure = entries.iterator().next(); assertThat(failure.getException().deserializeError(classLoader)) .isEqualTo(expectedException1); final Iterable<ExceptionHistoryEntry> concurrentExceptions = failure.getConcurrentExceptions(); final List<Throwable> foundExceptions = IterableUtils.toStream(concurrentExceptions) .map(ExceptionHistoryEntry::getException) .map(exception -> exception.deserializeError(classLoader)) .collect(Collectors.toList()); assertThat(foundExceptions).containsExactly(expectedException2); }
@Override public MergeAppend appendFile(DataFile file) { add(file); return this; }
@TestTemplate public void testEmptyTableAppendFilesWithDifferentSpecs() { assertThat(listManifestFiles()).as("Table should start empty").isEmpty(); TableMetadata base = readMetadata(); assertThat(base.currentSnapshot()).as("Should not have a current snapshot").isNull(); assertThat(base.lastSequenceNumber()).as("Last sequence number should be 0").isEqualTo(0); table.updateSpec().addField("id").commit(); PartitionSpec newSpec = table.spec(); assertThat(table.specs()).as("Table should have 2 specs").hasSize(2); DataFile fileNewSpec = DataFiles.builder(newSpec) .withPath("/path/to/data-b.parquet") .withPartitionPath("data_bucket=0/id=0") .withFileSizeInBytes(10) .withRecordCount(1) .build(); Snapshot committedSnapshot = commit(table, table.newAppend().appendFile(FILE_A).appendFile(fileNewSpec), branch); assertThat(committedSnapshot).as("Should create a snapshot").isNotNull(); V1Assert.assertEquals( "Last sequence number should be 0", 0, table.ops().current().lastSequenceNumber()); V2Assert.assertEquals( "Last sequence number should be 1", 1, table.ops().current().lastSequenceNumber()); assertThat(committedSnapshot.allManifests(table.io())) .as("Should create 2 manifests for initial write, 1 manifest per spec") .hasSize(2); long snapshotId = committedSnapshot.snapshotId(); ImmutableMap<Integer, DataFile> expectedFileBySpec = ImmutableMap.of(SPEC.specId(), FILE_A, newSpec.specId(), fileNewSpec); expectedFileBySpec.forEach( (specId, expectedDataFile) -> { ManifestFile manifestFileForSpecId = committedSnapshot.allManifests(table.io()).stream() .filter(m -> Objects.equals(m.partitionSpecId(), specId)) .findAny() .get(); validateManifest( manifestFileForSpecId, dataSeqs(1L), fileSeqs(1L), ids(snapshotId), files(expectedDataFile), statuses(Status.ADDED)); }); }
@Override public Credentials get() throws BackgroundException { if(log.isDebugEnabled()) { log.debug(String.format("Configure credentials from %s", url)); } final Host address = new HostParser(ProtocolFactory.get()).get(url); final HttpConnectionPoolBuilder builder = new HttpConnectionPoolBuilder(address, new ThreadLocalHostnameDelegatingTrustManager(trust, address.getHostname()), key, ProxyFactory.get()); final HttpClientBuilder configuration = builder.build(ProxyFactory.get(), new DisabledTranscriptListener(), new DisabledLoginCallback()); try (CloseableHttpClient client = configuration.build()) { final HttpRequestBase resource = new HttpGet(new HostUrlProvider().withUsername(false).withPath(true).get(address)); return client.execute(resource, new ResponseHandler<Credentials>() { @Override public Credentials handleResponse(final HttpResponse response) throws IOException { switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: final HttpEntity entity = response.getEntity(); if(entity == null) { log.warn(String.format("Missing response entity in %s", response)); throw new ClientProtocolException("Empty response"); } else { return parse(entity.getContent()); } } throw new HttpResponseException(response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()); } }); } catch(IOException e) { log.warn(String.format("Failure %s to retrieve session credentials", e)); throw new LoginFailureException(e.getMessage(), e); } }
@Test(expected = ConnectionTimeoutException.class) @Ignore public void testGet() throws Exception { new AWSSessionCredentialsRetriever(new DisabledX509TrustManager(), new DefaultX509KeyManager(), "http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access") .get(); }
@Override public <V1, R> KTable<K, R> join(final KTable<K, V1> other, final ValueJoiner<? super V, ? super V1, ? extends R> joiner) { return doJoin(other, joiner, NamedInternal.empty(), null, false, false); }
@Test public void shouldAllowNullStoreInJoin() { table.join(table, MockValueJoiner.TOSTRING_JOINER); }
public final void addStateStore(final StoreBuilder<?> storeBuilder, final String... processorNames) { addStateStore(new StoreBuilderWrapper(storeBuilder), false, processorNames); }
@Test public void testAddStateStoreWithNonExistingProcessor() { assertThrows(TopologyException.class, () -> builder.addStateStore(storeBuilder, "no-such-processor")); }
@Override public synchronized void performFailover(T currentProxy) { toIgnore = ((RequestHedgingInvocationHandler) Proxy.getInvocationHandler( currentUsedHandler.proxy)).currentUsedProxy.proxyInfo; this.currentUsedHandler = null; }
@Test public void testPerformFailover() throws Exception { final AtomicInteger counter = new AtomicInteger(0); final int[] isGood = {1}; final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class); Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() { @Override public long[] answer(InvocationOnMock invocation) throws Throwable { counter.incrementAndGet(); if (isGood[0] == 1) { Thread.sleep(1000); return new long[]{1}; } throw new IOException("Was Good mock !!"); } }); final ClientProtocol badMock = Mockito.mock(ClientProtocol.class); Mockito.when(badMock.getStats()).thenAnswer(new Answer<long[]>() { @Override public long[] answer(InvocationOnMock invocation) throws Throwable { counter.incrementAndGet(); if (isGood[0] == 2) { Thread.sleep(1000); return new long[]{2}; } throw new IOException("Bad mock !!"); } }); RequestHedgingProxyProvider<ClientProtocol> provider = new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class, createFactory(goodMock, badMock)); long[] stats = provider.getProxy().proxy.getStats(); Assert.assertTrue(stats.length == 1); Assert.assertEquals(1, stats[0]); Assert.assertEquals(2, counter.get()); Mockito.verify(badMock).getStats(); Mockito.verify(goodMock).getStats(); stats = provider.getProxy().proxy.getStats(); Assert.assertTrue(stats.length == 1); Assert.assertEquals(1, stats[0]); // Ensure only the previous successful one is invoked Mockito.verifyNoMoreInteractions(badMock); Assert.assertEquals(3, counter.get()); // Flip to standby.. so now this should fail isGood[0] = 2; try { provider.getProxy().proxy.getStats(); Assert.fail("Should fail since previously successful proxy now fails "); } catch (Exception ex) { Assert.assertTrue(ex instanceof IOException); } Assert.assertEquals(4, counter.get()); provider.performFailover(provider.getProxy().proxy); stats = provider.getProxy().proxy.getStats(); Assert.assertTrue(stats.length == 1); Assert.assertEquals(2, stats[0]); // Counter should update only once Assert.assertEquals(5, counter.get()); stats = provider.getProxy().proxy.getStats(); Assert.assertTrue(stats.length == 1); Assert.assertEquals(2, stats[0]); // Counter updates only once now Assert.assertEquals(6, counter.get()); // Flip back to old active.. so now this should fail isGood[0] = 1; try { provider.getProxy().proxy.getStats(); Assert.fail("Should fail since previously successful proxy now fails "); } catch (Exception ex) { Assert.assertTrue(ex instanceof IOException); } Assert.assertEquals(7, counter.get()); provider.performFailover(provider.getProxy().proxy); stats = provider.getProxy().proxy.getStats(); Assert.assertTrue(stats.length == 1); // Ensure correct proxy was called Assert.assertEquals(1, stats[0]); }
public static <K, V> Read<K, V> read() { return new AutoValue_CdapIO_Read.Builder<K, V>().build(); }
@Test public void testReadObjectCreationFailsIfValueClassIsNull() { assertThrows( IllegalArgumentException.class, () -> CdapIO.<String, String>read().withValueClass(null)); }
public File toFile() { if (!(fs instanceof LocalFileSystem)) { throw new IllegalArgumentException("Not a local path: " + path); } return ((LocalFileSystem)fs).pathToFile(path); }
@Test (timeout = 30000) public void testToFile() throws Exception { PathData item = new PathData(".", conf); assertEquals(new File(testDir.toString()), item.toFile()); item = new PathData("d1/f1", conf); assertEquals(new File(testDir + "/d1/f1"), item.toFile()); item = new PathData(testDir + "/d1/f1", conf); assertEquals(new File(testDir + "/d1/f1"), item.toFile()); }
public static List<TenantSecretStore> populateExternalId(SecretStore secretStore, TenantName tenant, SystemName system, List<TenantSecretStore> tenantSecretStores) { return tenantSecretStores.stream() .map(tenantSecretStore -> { var secretName = secretName(tenant, system, tenantSecretStore.getName()); try { String secret = secretStore.getSecret(secretName); if (secret == null) throw new InvalidApplicationException("No secret found in secret store for " + secretName); return tenantSecretStore.withExternalId(secret); } catch (SecretNotFoundException e) { throw new InvalidApplicationException("Could not find externalId for secret store: %s".formatted(tenantSecretStore.getName())); } }) .toList(); }
@Test public void reports_application_package_error_when_external_id_not_found() { InvalidApplicationException exception = assertThrows(InvalidApplicationException.class, () -> SecretStoreExternalIdRetriever.populateExternalId(secretStore, tenantName, SystemName.PublicCd, List.of(tenantSecretStore))); assertEquals("Could not find externalId for secret store: name", exception.getMessage()); }
public static PostgreSQLCommandPacket newInstance(final PostgreSQLCommandPacketType commandPacketType, final PostgreSQLPacketPayload payload) { if (!PostgreSQLCommandPacketType.isExtendedProtocolPacketType(commandPacketType)) { payload.getByteBuf().skipBytes(1); return getPostgreSQLCommandPacket(commandPacketType, payload); } List<PostgreSQLCommandPacket> result = new ArrayList<>(); while (payload.hasCompletePacket()) { PostgreSQLCommandPacketType type = PostgreSQLCommandPacketType.valueOf(payload.readInt1()); int length = payload.getByteBuf().getInt(payload.getByteBuf().readerIndex()); PostgreSQLPacketPayload slicedPayload = new PostgreSQLPacketPayload(payload.getByteBuf().readSlice(length), payload.getCharset()); result.add(getPostgreSQLCommandPacket(type, slicedPayload)); } return new PostgreSQLAggregatedCommandPacket(result); }
@Test void assertNewInstanceWithQueryComPacket() { when(payload.getByteBuf()).thenReturn(mock(ByteBuf.class)); when(payload.readStringNul()).thenReturn(""); assertThat(PostgreSQLCommandPacketFactory.newInstance(PostgreSQLCommandPacketType.SIMPLE_QUERY, payload), instanceOf(PostgreSQLComQueryPacket.class)); }
public BootstrapMetadata read() throws Exception { Path path = Paths.get(directoryPath); if (!Files.isDirectory(path)) { if (Files.exists(path)) { throw new RuntimeException("Path " + directoryPath + " exists, but is not " + "a directory."); } else { throw new RuntimeException("No such directory as " + directoryPath); } } Path binaryBootstrapPath = Paths.get(directoryPath, BINARY_BOOTSTRAP_FILENAME); if (!Files.exists(binaryBootstrapPath)) { return readFromConfiguration(); } else { return readFromBinaryFile(binaryBootstrapPath.toString()); } }
@Test public void testReadFromEmptyConfiguration() throws Exception { try (BootstrapTestDirectory testDirectory = new BootstrapTestDirectory().createDirectory()) { assertEquals(BootstrapMetadata.fromVersion(MetadataVersion.latestProduction(), "the default bootstrap"), new BootstrapDirectory(testDirectory.path(), Optional.empty()).read()); } }
public void registerUrl( String urlString ) { if ( urlString == null || addedAllClusters == true ) { return; //We got no url or already added all clusters so nothing to do. } if ( urlString.startsWith( VARIABLE_START ) ) { addAllClusters(); } Pattern r = Pattern.compile( URL_PATTERN ); Matcher m = r.matcher( urlString ); if ( m.find() ) { String protocol = m.group( PARSE_URL_SCHEME ); String clusterName = m.group( PARSE_URL_AUTHORITY ); if ( "hc".equals( protocol ) ) { if ( clusterName.startsWith( VARIABLE_START ) ) { addAllClusters(); } addClusterToMeta( clusterName ); } } }
@Test public void testRegisterUrlRegularFile() throws Exception { namedClusterEmbedManager.registerUrl( "/" + CLUSTER1_NAME + "/dir1/dir2" ); verify( mockMetaStoreFactory, never() ).saveElement( any() ); }
public CompletableFuture<Object> terminationFuture() { return onFinish; }
@Test public void testDelegation() throws Exception { AtomicBoolean clientClosedStream = new AtomicBoolean(); BlockingQueue<BeamFnApi.InstructionResponse> values = new LinkedBlockingQueue<>(); BlockingQueue<StreamObserver<BeamFnApi.InstructionRequest>> outboundServerObservers = new LinkedBlockingQueue<>(); CallStreamObserver<BeamFnApi.InstructionResponse> inboundServerObserver = TestStreams.withOnNext(values::add) .withOnCompleted(() -> clientClosedStream.set(true)) .build(); Endpoints.ApiServiceDescriptor apiServiceDescriptor = Endpoints.ApiServiceDescriptor.newBuilder() .setUrl(this.getClass().getName() + "-" + UUID.randomUUID().toString()) .build(); Server server = InProcessServerBuilder.forName(apiServiceDescriptor.getUrl()) .addService( new BeamFnControlGrpc.BeamFnControlImplBase() { @Override public StreamObserver<BeamFnApi.InstructionResponse> control( StreamObserver<BeamFnApi.InstructionRequest> outboundObserver) { Uninterruptibles.putUninterruptibly(outboundServerObservers, outboundObserver); return inboundServerObserver; } }) .build(); server.start(); try { EnumMap< BeamFnApi.InstructionRequest.RequestCase, ThrowingFunction<BeamFnApi.InstructionRequest, BeamFnApi.InstructionResponse.Builder>> handlers = new EnumMap<>(BeamFnApi.InstructionRequest.RequestCase.class); handlers.put( BeamFnApi.InstructionRequest.RequestCase.PROCESS_BUNDLE, value -> { assertEquals(value.getInstructionId(), BeamFnLoggingMDC.getInstructionId()); return BeamFnApi.InstructionResponse.newBuilder() .setProcessBundle(BeamFnApi.ProcessBundleResponse.getDefaultInstance()); }); handlers.put( BeamFnApi.InstructionRequest.RequestCase.REGISTER, value -> { assertEquals(value.getInstructionId(), BeamFnLoggingMDC.getInstructionId()); throw FAILURE; }); ExecutorService executor = Executors.newCachedThreadPool(); BeamFnControlClient client = new BeamFnControlClient( apiServiceDescriptor, ManagedChannelFactory.createInProcess(), OutboundObserverFactory.trivial(), executor, handlers); // Get the connected client and attempt to send and receive an instruction StreamObserver<BeamFnApi.InstructionRequest> outboundServerObserver = outboundServerObservers.take(); outboundServerObserver.onNext(SUCCESSFUL_REQUEST); assertEquals(SUCCESSFUL_RESPONSE, values.take()); // Ensure that conversion of an unknown request type is properly converted to a // failure response. outboundServerObserver.onNext(UNKNOWN_HANDLER_REQUEST); assertEquals(UNKNOWN_HANDLER_RESPONSE, values.take()); // Ensure that all exceptions are caught and translated to failures outboundServerObserver.onNext(FAILURE_REQUEST); assertEquals(FAILURE_RESPONSE, values.take()); // Ensure that the server completing the stream translates to the completable future // being completed allowing for a successful shutdown of the client. outboundServerObserver.onCompleted(); client.terminationFuture().get(); } finally { server.shutdownNow(); } }
@Override public Map<RedisClusterNode, Collection<RedisClusterNode>> clusterGetMasterReplicaMap() { Iterable<RedisClusterNode> res = clusterGetNodes(); Set<RedisClusterNode> masters = new HashSet<>(); for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) { RedisClusterNode redisClusterNode = iterator.next(); if (redisClusterNode.isMaster()) { masters.add(redisClusterNode); } } Map<RedisClusterNode, Collection<RedisClusterNode>> result = new HashMap<RedisClusterNode, Collection<RedisClusterNode>>(); for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) { RedisClusterNode redisClusterNode = iterator.next(); for (RedisClusterNode masterNode : masters) { if (redisClusterNode.getMasterId() != null && redisClusterNode.getMasterId().equals(masterNode.getId())) { Collection<RedisClusterNode> list = result.get(masterNode); if (list == null) { list = new ArrayList<RedisClusterNode>(); result.put(masterNode, list); } list.add(redisClusterNode); } } } return result; }
@Test public void testClusterGetMasterSlaveMap() { testInCluster(connection -> { Map<RedisClusterNode, Collection<RedisClusterNode>> map = connection.clusterGetMasterReplicaMap(); assertThat(map).hasSize(3); for (Collection<RedisClusterNode> slaves : map.values()) { assertThat(slaves).hasSize(1); } }); }
@Override public Map<String, String> getAddresses() { AwsCredentials credentials = awsCredentialsProvider.credentials(); Map<String, String> instances = Collections.emptyMap(); if (!awsConfig.anyOfEcsPropertiesConfigured()) { instances = awsEc2Api.describeInstances(credentials); } if (awsConfig.anyOfEc2PropertiesConfigured()) { return instances; } if (instances.isEmpty() && DiscoveryMode.Client == awsConfig.getDiscoveryMode()) { return getEcsAddresses(credentials); } return instances; }
@Test public void doNotGetEc2AddressesWhenEcsConfigured() { AwsCredentials credentials = AwsCredentials.builder() .setAccessKey("access-key") .setSecretKey("secret-key") .setToken("token") .build(); AwsConfig awsConfig = AwsConfig.builder() .setCluster("CLUSTER") .setDiscoveryMode(DiscoveryMode.Client) .build(); awsEc2Client = new AwsEc2Client(awsEc2Api, awsEcsApi, awsMetadataApi, awsCredentialsProvider, awsConfig); Map<String, String> expectedResult = singletonMap("123.12.1.0", "1.4.6.2"); ArrayList<String> privateIps = new ArrayList<>(expectedResult.keySet()); given(awsCredentialsProvider.credentials()).willReturn(credentials); given(awsEcsApi.listTaskPrivateAddresses("CLUSTER", credentials)).willReturn(privateIps); given(awsEc2Api.describeNetworkInterfaces(privateIps, credentials)).willReturn(expectedResult); // when Map<String, String> result = awsEc2Client.getAddresses(); // then then(awsEc2Api).should(never()).describeInstances(credentials); assertEquals(expectedResult, result); }
public Timer timer(String name) { return getOrAdd(name, MetricBuilder.TIMERS); }
@Test public void accessingATimerRegistersAndReusesIt() { final Timer timer1 = registry.timer("thing"); final Timer timer2 = registry.timer("thing"); assertThat(timer1) .isSameAs(timer2); verify(listener).onTimerAdded("thing", timer1); }
@Override public CompletableFuture<List<ConsumerGroupDescribeResponseData.DescribedGroup>> consumerGroupDescribe( RequestContext context, List<String> groupIds ) { if (!isActive.get()) { return CompletableFuture.completedFuture(ConsumerGroupDescribeRequest.getErrorDescribedGroupList( groupIds, Errors.COORDINATOR_NOT_AVAILABLE )); } final List<CompletableFuture<List<ConsumerGroupDescribeResponseData.DescribedGroup>>> futures = new ArrayList<>(groupIds.size()); final Map<TopicPartition, List<String>> groupsByTopicPartition = new HashMap<>(); groupIds.forEach(groupId -> { if (isGroupIdNotEmpty(groupId)) { groupsByTopicPartition .computeIfAbsent(topicPartitionFor(groupId), __ -> new ArrayList<>()) .add(groupId); } else { futures.add(CompletableFuture.completedFuture(Collections.singletonList( new ConsumerGroupDescribeResponseData.DescribedGroup() .setGroupId(null) .setErrorCode(Errors.INVALID_GROUP_ID.code()) ))); } }); groupsByTopicPartition.forEach((topicPartition, groupList) -> { CompletableFuture<List<ConsumerGroupDescribeResponseData.DescribedGroup>> future = runtime.scheduleReadOperation( "consumer-group-describe", topicPartition, (coordinator, lastCommittedOffset) -> coordinator.consumerGroupDescribe(groupIds, lastCommittedOffset) ).exceptionally(exception -> handleOperationException( "consumer-group-describe", groupList, exception, (error, __) -> ConsumerGroupDescribeRequest.getErrorDescribedGroupList(groupList, error) )); futures.add(future); }); return FutureUtils.combineFutures(futures, ArrayList::new, List::addAll); }
@Test public void testConsumerGroupDescribeCoordinatorNotActive() throws ExecutionException, InterruptedException { CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime(); GroupCoordinatorService service = new GroupCoordinatorService( new LogContext(), createConfig(), runtime, new GroupCoordinatorMetrics(), createConfigManager() ); when(runtime.scheduleReadOperation( ArgumentMatchers.eq("consumer-group-describe"), ArgumentMatchers.eq(new TopicPartition("__consumer_offsets", 0)), ArgumentMatchers.any() )).thenReturn(FutureUtils.failedFuture( Errors.COORDINATOR_NOT_AVAILABLE.exception() )); CompletableFuture<List<ConsumerGroupDescribeResponseData.DescribedGroup>> future = service.consumerGroupDescribe(requestContext(ApiKeys.CONSUMER_GROUP_DESCRIBE), Collections.singletonList("group-id")); assertEquals( Collections.singletonList(new ConsumerGroupDescribeResponseData.DescribedGroup() .setGroupId("group-id") .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) ), future.get() ); }
public static Map<String, Object> toValueMap(ReferenceMap m, Map<String, ValueReference> parameters) { final ImmutableMap.Builder<String, Object> mapBuilder = ImmutableMap.builder(); for (Map.Entry<String, Reference> entry : m.entrySet()) { final Object value = valueOf(entry.getValue(), parameters); if (value != null) { mapBuilder.put(entry.getKey(), value); } } return mapBuilder.build(); }
@Test public void toValueMapWithCircularParameter() { final Map<String, ValueReference> parameters = Collections.singletonMap("STRING", ValueReference.createParameter("OTHER")); final ReferenceMap map = new ReferenceMap(Collections.singletonMap("param", ValueReference.createParameter("STRING"))); assertThatThrownBy(() -> ReferenceMapUtils.toValueMap(map, parameters)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Circular parameter STRING"); }
@VisibleForTesting static boolean hasEnoughCurvature(final int[] xs, final int[] ys, final int middlePointIndex) { // Calculate the radianValue formed between middlePointIndex, and one point in either // direction final int startPointIndex = middlePointIndex - CURVATURE_NEIGHBORHOOD; final int startX = xs[startPointIndex]; final int startY = ys[startPointIndex]; final int endPointIndex = middlePointIndex + CURVATURE_NEIGHBORHOOD; final int endX = xs[endPointIndex]; final int endY = ys[endPointIndex]; final int middleX = xs[middlePointIndex]; final int middleY = ys[middlePointIndex]; final int firstSectionXDiff = startX - middleX; final int firstSectionYDiff = startY - middleY; final double firstSectionLength = Math.sqrt(firstSectionXDiff * firstSectionXDiff + firstSectionYDiff * firstSectionYDiff); final int secondSectionXDiff = endX - middleX; final int secondSectionYDiff = endY - middleY; final double secondSectionLength = Math.sqrt( secondSectionXDiff * secondSectionXDiff + secondSectionYDiff * secondSectionYDiff); final double dotProduct = firstSectionXDiff * secondSectionXDiff + firstSectionYDiff * secondSectionYDiff; final double radianValue = Math.acos(dotProduct / firstSectionLength / secondSectionLength); return radianValue <= CURVATURE_THRESHOLD; }
@Test public void testHasEnoughCurvature180Degrees() { final int[] Xs = new int[3]; final int[] Ys = new int[3]; Xs[0] = 0; Ys[0] = -50; Xs[1] = 0; Ys[1] = 0; Xs[2] = 0; Ys[2] = -50; Assert.assertTrue(GestureTypingDetector.hasEnoughCurvature(Xs, Ys, 1)); Xs[0] = -50; Ys[0] = 0; Xs[1] = 0; Ys[1] = 0; Xs[2] = -50; Ys[2] = 0; Assert.assertTrue(GestureTypingDetector.hasEnoughCurvature(Xs, Ys, 1)); }
public boolean checkOrigin(String origin) { try { return CorsUtils.isValidOrigin(origin, zConf); } catch (UnknownHostException | URISyntaxException e) { LOG.error(e.toString(), e); } return false; }
@Test void checkOrigin() throws UnknownHostException { String origin = "http://" + InetAddress.getLocalHost().getHostName() + ":8080"; assertTrue(notebookServer.checkOrigin(origin), "Origin " + origin + " is not allowed. Please check your hostname."); }
@Override public String getDisplayName() { return CaseInsensitiveString.str(getName()); }
@Test void shouldReturnMaterialNameIfDefined() throws Exception { DependencyMaterial material = new DependencyMaterial(new CaseInsensitiveString("upstream"), new CaseInsensitiveString("first")); material.setName(new CaseInsensitiveString("my_name")); assertThat(material.getDisplayName()).isEqualTo("my_name"); }
public FEELFnResult<Boolean> invoke(@ParameterName( "range1" ) Range range1, @ParameterName( "range2" ) Range range2) { if ( range1 == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range1", "cannot be null")); } if ( range2 == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range2", "cannot be null")); } try { boolean result = range1.getLowBoundary() == Range.RangeBoundary.CLOSED && range2.getHighBoundary() == Range.RangeBoundary.CLOSED && range1.getLowEndPoint().compareTo(range2.getHighEndPoint()) == 0; return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range1", "cannot be compared to range2")); } }
@Test void invokeParamRangeAndRange() { FunctionTestUtil.assertResult( metByFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ), Boolean.FALSE ); FunctionTestUtil.assertResult( metByFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, "c", "k", Range.RangeBoundary.CLOSED ) ), Boolean.FALSE ); FunctionTestUtil.assertResult( metByFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, "f", "k", Range.RangeBoundary.CLOSED ) ), Boolean.FALSE ); FunctionTestUtil.assertResult( metByFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.OPEN, "g", "k", Range.RangeBoundary.CLOSED ) ), Boolean.FALSE ); FunctionTestUtil.assertResult( metByFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "f", "k", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ), Boolean.TRUE ); }
@Override public RemotingDesc getServiceDesc(Object bean, String beanName) throws FrameworkException { if (!this.isRemoting(bean, beanName)) { return null; } RemotingDesc remotingDesc = new RemotingDesc(); remotingDesc.setReference(this.isReference(bean, beanName)); remotingDesc.setService(this.isService(bean, beanName)); remotingDesc.setProtocol(Protocols.IN_JVM); Class<?> classType = bean.getClass(); // check if LocalTCC annotation is marked on the implementation class if (classType.isAnnotationPresent(LocalTCC.class)) { remotingDesc.setServiceClass(AopProxyUtils.ultimateTargetClass(bean)); remotingDesc.setServiceClassName(remotingDesc.getServiceClass().getName()); remotingDesc.setTargetBean(bean); return remotingDesc; } // check if LocalTCC annotation is marked on the interface Set<Class<?>> interfaceClasses = ReflectionUtil.getInterfaces(classType); for (Class<?> interClass : interfaceClasses) { if (interClass.isAnnotationPresent(LocalTCC.class)) { remotingDesc.setServiceClassName(interClass.getName()); remotingDesc.setServiceClass(interClass); remotingDesc.setTargetBean(bean); return remotingDesc; } } throw new FrameworkException("Couldn't parser any Remoting info"); }
@Test public void testServiceDesc(){ TccActionImpl tccAction = new TccActionImpl(); RemotingDesc remotingDesc = localTCCRemotingParser.getServiceDesc(tccAction, "c"); Assertions.assertNotNull(remotingDesc); Assertions.assertEquals("org.apache.seata.rm.tcc.TccAction", remotingDesc.getServiceClassName()); Assertions.assertEquals(remotingDesc.getServiceClass(), TccAction.class); Assertions.assertEquals(remotingDesc.getTargetBean(), tccAction); }
@Override public void configure(String encodedAuthParamString) { if (StringUtils.isBlank(encodedAuthParamString)) { throw new IllegalArgumentException("No authentication parameters were provided"); } Map<String, String> params; try { params = AuthenticationUtil.configureFromJsonString(encodedAuthParamString); } catch (IOException e) { throw new IllegalArgumentException("Malformed authentication parameters", e); } String type = params.getOrDefault(CONFIG_PARAM_TYPE, TYPE_CLIENT_CREDENTIALS); switch(type) { case TYPE_CLIENT_CREDENTIALS: this.flow = ClientCredentialsFlow.fromParameters(params); break; default: throw new IllegalArgumentException("Unsupported authentication type: " + type); } }
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = ".*Required.*") public void testConfigureRequired() throws Exception { this.auth.configure("{}"); }
@Override public List<KsqlPartitionLocation> locate( final List<KsqlKey> keys, final RoutingOptions routingOptions, final RoutingFilterFactory routingFilterFactory, final boolean isRangeScan ) { if (isRangeScan && keys.isEmpty()) { throw new IllegalStateException("Query is range scan but found no range keys."); } final ImmutableList.Builder<KsqlPartitionLocation> partitionLocations = ImmutableList.builder(); final Set<Integer> filterPartitions = routingOptions.getPartitions(); final Optional<Set<KsqlKey>> keySet = keys.isEmpty() ? Optional.empty() : Optional.of(Sets.newHashSet(keys)); // Depending on whether this is a key-based lookup, determine which metadata method to use. // If we don't have keys, find the metadata for all partitions since we'll run the query for // all partitions of the state store rather than a particular one. //For issue #7174. Temporarily turn off metadata finding for a partition with keys //if there are more than one key. final List<PartitionMetadata> metadata; if (keys.size() == 1 && keys.get(0).getKey().size() == 1 && !isRangeScan) { metadata = getMetadataForKeys(keys, filterPartitions); } else { metadata = getMetadataForAllPartitions(filterPartitions, keySet); } if (metadata.isEmpty()) { final MaterializationException materializationException = new MaterializationException( "Cannot determine which host contains the required partitions to serve the pull query. \n" + "The underlying persistent query may be restarting (e.g. as a result of " + "ALTER SYSTEM) view the status of your by issuing <DESCRIBE foo>."); LOG.debug(materializationException.getMessage()); throw materializationException; } // Go through the metadata and group them by partition. for (PartitionMetadata partitionMetadata : metadata) { LOG.debug("Handling pull query for partition {} of state store {}.", partitionMetadata.getPartition(), storeName); final HostInfo activeHost = partitionMetadata.getActiveHost(); final Set<HostInfo> standByHosts = partitionMetadata.getStandbyHosts(); final int partition = partitionMetadata.getPartition(); final Optional<Set<KsqlKey>> partitionKeys = partitionMetadata.getKeys(); LOG.debug("Active host {}, standby {}, partition {}.", activeHost, standByHosts, partition); // For a given partition, find the ordered, filtered list of hosts to consider final List<KsqlNode> filteredHosts = getFilteredHosts(routingOptions, routingFilterFactory, activeHost, standByHosts, partition); partitionLocations.add(new PartitionLocation(partitionKeys, partition, filteredHosts)); } return partitionLocations.build(); }
@Test public void shouldFindAllPartitionsWhenNoKeys() { // Given: when(topology.describe()).thenReturn(description); when(description.subtopologies()).thenReturn(ImmutableSet.of(sub1)); when(sub1.nodes()).thenReturn(ImmutableSet.of(source, processor)); when(source.topicSet()).thenReturn(ImmutableSet.of(TOPIC_NAME)); when(processor.stores()).thenReturn(ImmutableSet.of(STORE_NAME)); when(kafkaStreams.streamsMetadataForStore(any())) .thenReturn(ImmutableList.of(HOST1_STREAMS_MD1, HOST1_STREAMS_MD2, HOST1_STREAMS_MD3)); // When: final List<KsqlPartitionLocation> result = locator.locate( ImmutableList.of(), routingOptions, routingFilterFactoryStandby, false); // Then: assertThat(result.size(), is(3)); int partition = result.get(0).getPartition(); assertThat(partition, is(0)); List<KsqlNode> nodeList = result.get(0).getNodes(); assertThat(nodeList.size(), is(3)); assertThat(nodeList.get(0), is(activeNode)); assertThat(nodeList.get(1), is(standByNode1)); assertThat(nodeList.get(2), is(standByNode2)); partition = result.get(1).getPartition(); assertThat(partition, is(1)); nodeList = result.get(1).getNodes(); assertThat(nodeList.size(), is(3)); assertThat(nodeList.get(0), is(standByNode1)); assertThat(nodeList.get(1), is(activeNode)); assertThat(nodeList.get(2), is(standByNode2)); partition = result.get(2).getPartition(); assertThat(partition, is(2)); nodeList = result.get(2).getNodes(); assertThat(nodeList.size(), is(3)); assertThat(nodeList.get(0), is(standByNode2)); assertThat(nodeList.get(1), is(activeNode)); assertThat(nodeList.get(2), is(standByNode1)); }
public static DataSchema canonicalizeDataSchemaForAggregation(QueryContext queryContext, DataSchema dataSchema) { List<Pair<AggregationFunction, FilterContext>> filteredAggregationFunctions = queryContext.getFilteredAggregationFunctions(); assert filteredAggregationFunctions != null; int numAggregations = filteredAggregationFunctions.size(); Preconditions.checkState(dataSchema.size() == numAggregations, "BUG: Expect same number of aggregations and columns in data schema, got %s aggregations, %s columns in data " + "schema", numAggregations, dataSchema.size()); String[] columnNames = new String[numAggregations]; for (int i = 0; i < numAggregations; i++) { Pair<AggregationFunction, FilterContext> pair = filteredAggregationFunctions.get(i); AggregationFunction aggregationFunction = pair.getLeft(); columnNames[i] = AggregationFunctionUtils.getResultColumnName(aggregationFunction, pair.getRight()); } return new DataSchema(columnNames, dataSchema.getColumnDataTypes()); }
@Test public void testCanonicalizeDataSchemaForAggregation() { QueryContext queryContext = QueryContextConverterUtils.getQueryContext("SELECT SUM(col1 + col2) FROM testTable"); // Intentionally make data schema not matching the string representation of the expression DataSchema dataSchema = new DataSchema(new String[]{"sum(col1+col2)"}, new ColumnDataType[]{ColumnDataType.DOUBLE}); DataSchema canonicalDataSchema = ReducerDataSchemaUtils.canonicalizeDataSchemaForAggregation(queryContext, dataSchema); assertEquals(canonicalDataSchema, new DataSchema(new String[]{"sum(plus(col1,col2))"}, new ColumnDataType[]{ColumnDataType.DOUBLE})); queryContext = QueryContextConverterUtils.getQueryContext("SELECT SUM(col1 + 1), MIN(col2 + 2) FROM testTable"); // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"sum(col1+1)", "min(col2+2)"}, new ColumnDataType[]{ColumnDataType.DOUBLE, ColumnDataType.DOUBLE}); canonicalDataSchema = ReducerDataSchemaUtils.canonicalizeDataSchemaForAggregation(queryContext, dataSchema); assertEquals(canonicalDataSchema, new DataSchema(new String[]{"sum(plus(col1,'1'))", "min(plus(col2,'2'))"}, new ColumnDataType[]{ColumnDataType.DOUBLE, ColumnDataType.DOUBLE})); queryContext = QueryContextConverterUtils.getQueryContext( "SELECT MAX(col1 + 1) FILTER(WHERE col3 > 0) - MIN(col2 + 2) FILTER(WHERE col4 > 0) FROM testTable"); // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"max(col1+1)", "min(col2+2)"}, new ColumnDataType[]{ColumnDataType.DOUBLE, ColumnDataType.DOUBLE}); canonicalDataSchema = ReducerDataSchemaUtils.canonicalizeDataSchemaForAggregation(queryContext, dataSchema); assertEquals(canonicalDataSchema, new DataSchema( new String[]{"max(plus(col1,'1')) FILTER(WHERE col3 > '0')", "min(plus(col2,'2')) FILTER(WHERE col4 > '0')"}, new ColumnDataType[]{ColumnDataType.DOUBLE, ColumnDataType.DOUBLE})); }
public static AdaBoost fit(Formula formula, DataFrame data) { return fit(formula, data, new Properties()); }
@Test public void testPenDigits() { System.out.println("Pen Digits"); MathEx.setSeed(19650218); // to get repeatable results. ClassificationValidations<AdaBoost> result = CrossValidation.classification(10, PenDigits.formula, PenDigits.data, (f, x) -> AdaBoost.fit(f, x, 200, 20, 4, 1)); System.out.println(result); assertEquals(0.9525, result.avg.accuracy, 1E-4); }
@Override public void updateIndices(SegmentDirectory.Writer segmentWriter) throws Exception { Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter); if (columnOperationsMap.isEmpty()) { return; } for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) { String column = entry.getKey(); List<Operation> operations = entry.getValue(); for (Operation operation : operations) { switch (operation) { case DISABLE_FORWARD_INDEX: // Deletion of the forward index will be handled outside the index handler to ensure that other index // handlers that need the forward index to construct their own indexes will have it available. _tmpForwardIndexColumns.add(column); break; case ENABLE_FORWARD_INDEX: ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false); if (columnMetadata.hasDictionary()) { if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException(String.format( "Dictionary should still exist after rebuilding forward index for dictionary column: %s", column)); } } else { if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after rebuilding forward index for raw column: %s", column)); } } break; case DISABLE_DICTIONARY: Set<String> newForwardIndexDisabledColumns = FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(), _fieldIndexConfigs); if (newForwardIndexDisabledColumns.contains(column)) { removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter); if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after disabling dictionary for column: %s", column)); } } else { disableDictionaryAndCreateRawForwardIndex(column, segmentWriter); } break; case ENABLE_DICTIONARY: createDictBasedForwardIndex(column, segmentWriter); if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) { throw new IllegalStateException(String.format("Forward index was not created for column: %s", column)); } break; case CHANGE_INDEX_COMPRESSION_TYPE: rewriteForwardIndexForCompressionChange(column, segmentWriter); break; default: throw new IllegalStateException("Unsupported operation for column " + column); } } } }
@Test public void testEnableForwardIndexInDictModeForMVForwardIndexDisabledColumnWithDuplicates() throws Exception { SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); Random rand = new Random(); // Remove from forward index list but keep the inverted index enabled String column = MV_FORWARD_INDEX_DISABLED_DUPLICATES_COLUMNS.get( rand.nextInt(MV_FORWARD_INDEX_DISABLED_DUPLICATES_COLUMNS.size())); indexLoadingConfig.removeForwardIndexDisabledColumns(column); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); fwdIndexHandler.updateIndices(writer); fwdIndexHandler.postUpdateIndicesCleanup(writer); // Tear down before validation. Because columns.psf and index map cleanup happens at segmentDirectory.close() segmentLocalFSDirectory.close(); // Column validation. ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(column); validateIndexMap(column, true, false); validateForwardIndex(column, null, metadata.isSorted()); // In column metadata, some values can change since MV columns with duplicates lose the duplicates on forward index // regeneration. validateMetadataProperties(column, metadata.hasDictionary(), metadata.getColumnMaxLength(), metadata.getCardinality(), metadata.getTotalDocs(), metadata.getDataType(), metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), true); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(XUGU_BOOLEAN); builder.dataType(XUGU_BOOLEAN); break; case TINYINT: builder.columnType(XUGU_TINYINT); builder.dataType(XUGU_TINYINT); break; case SMALLINT: builder.columnType(XUGU_SMALLINT); builder.dataType(XUGU_SMALLINT); break; case INT: builder.columnType(XUGU_INTEGER); builder.dataType(XUGU_INTEGER); break; case BIGINT: builder.columnType(XUGU_BIGINT); builder.dataType(XUGU_BIGINT); break; case FLOAT: builder.columnType(XUGU_FLOAT); builder.dataType(XUGU_FLOAT); break; case DOUBLE: builder.columnType(XUGU_DOUBLE); builder.dataType(XUGU_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", XUGU_NUMERIC, precision, scale)); builder.dataType(XUGU_NUMERIC); builder.precision(precision); builder.scale(scale); break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(XUGU_BLOB); builder.dataType(XUGU_BLOB); } else if (column.getColumnLength() <= MAX_BINARY_LENGTH) { builder.columnType(XUGU_BINARY); builder.dataType(XUGU_BINARY); } else { builder.columnType(XUGU_BLOB); builder.dataType(XUGU_BLOB); } break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(String.format("%s(%s)", XUGU_VARCHAR, MAX_VARCHAR_LENGTH)); builder.dataType(XUGU_VARCHAR); } else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", XUGU_VARCHAR, column.getColumnLength())); builder.dataType(XUGU_VARCHAR); } else { builder.columnType(XUGU_CLOB); builder.dataType(XUGU_CLOB); } break; case DATE: builder.columnType(XUGU_DATE); builder.dataType(XUGU_DATE); break; case TIME: builder.dataType(XUGU_TIME); if (column.getScale() != null && column.getScale() > 0) { Integer timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", XUGU_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(XUGU_TIME); } break; case TIMESTAMP: if (column.getScale() == null || column.getScale() <= 0) { builder.columnType(XUGU_TIMESTAMP); } else { int timestampScale = column.getScale(); if (column.getScale() > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType(String.format("TIMESTAMP(%s)", timestampScale)); builder.scale(timestampScale); } builder.dataType(XUGU_TIMESTAMP); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.XUGU, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertLong() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.LONG_TYPE).build(); BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(XuguTypeConverter.XUGU_BIGINT, typeDefine.getColumnType()); Assertions.assertEquals(XuguTypeConverter.XUGU_BIGINT, typeDefine.getDataType()); }
@Override public PostgreSQLIdentifierTag getIdentifier() { return PostgreSQLCommandPacketType.CLOSE_COMMAND; }
@Test void assertIdentifier() { when(payload.readInt1()).thenReturn((int) 'S'); PostgreSQLIdentifierTag actual = new PostgreSQLComClosePacket(payload).getIdentifier(); assertThat(actual, is(PostgreSQLCommandPacketType.CLOSE_COMMAND)); }
@Override public void init(TbContext ctx, TbNodeConfiguration configuration) throws TbNodeException { this.config = TbNodeUtils.convert(configuration, CalculateDeltaNodeConfiguration.class); if (StringUtils.isBlank(config.getInputValueKey())) { throw new TbNodeException("Input value key should be specified!", true); } if (StringUtils.isBlank(config.getOutputValueKey())) { throw new TbNodeException("Output value key should be specified!", true); } if (config.isAddPeriodBetweenMsgs() && StringUtils.isBlank(config.getPeriodValueKey())) { throw new TbNodeException("Period value key should be specified!", true); } locks = new ConcurrentReferenceHashMap<>(16, ConcurrentReferenceHashMap.ReferenceType.WEAK); if (config.isUseCache()) { cache = new ConcurrentReferenceHashMap<>(16, ConcurrentReferenceHashMap.ReferenceType.SOFT); } }
@Test public void givenInvalidPeriodKeyAndAddPeriodDisabled_whenInitThenNoExceptionThrown() { config.setPeriodValueKey(null); config.setAddPeriodBetweenMsgs(false); nodeConfiguration = new TbNodeConfiguration(JacksonUtil.valueToTree(config)); assertDoesNotThrow(() -> node.init(ctxMock, nodeConfiguration)); }
@Override public void close() { preferredView.close(); if (secondaryView != null) { secondaryView.close(); } }
@Test public void testClose_noSecondaryInitialized() { fsView.close(); verify(primary, times(1)).close(); verify(secondary, never()).close(); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, String.valueOf(Path.DELIMITER)); }
@Test public void testListFileDot() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path file = new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch( new Path(container, ".", EnumSet.of(Path.Type.file)), new TransferStatus()); assertTrue(new S3ObjectListService(session, new S3AccessControlListFeature(session)).list(container, new DisabledListProgressListener()).contains(file)); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static double calculateFromSamplesUsingVasicek(double[] samples) { if (samples.length == 0) { return Double.NaN; } Arrays.sort(samples); int n = samples.length; int m = toIntExact(Math.max(Math.round(Math.sqrt(n)), 2)); double entropy = 0; for (int i = 0; i < n; i++) { double sIPlusM = i + m < n ? samples[i + m] : samples[n - 1]; double sIMinusM = i - m > 0 ? samples[i - m] : samples[0]; double aI = i + m < n && i - m > 0 ? 2 : 1; entropy += Math.log(n / (aI * m) * (sIPlusM - sIMinusM)); } return entropy / n / Math.log(2); }
@Test public void testNormalDistribution() { Random random = new Random(13); double[] samples = new double[10000000]; double sigma = 0.5; for (int i = 0; i < samples.length; i++) { samples[i] = 5 + sigma * random.nextGaussian(); } double expected = 0.5 * Math.log(2 * Math.PI * Math.E * sigma * sigma) / Math.log(2); assertEquals(calculateFromSamplesUsingVasicek(samples), expected, 0.02); }
public String toLocaleTimeString() { return toLocaleTimeString(Locale.getDefault().getLanguage()); }
@Test void testToLocaleTimeString() { TbDate d = new TbDate(1693962245000L); // Depends on time zone, so we just check it works; Assertions.assertNotNull(d.toLocaleTimeString()); Assertions.assertNotNull(d.toLocaleTimeString("en-US")); Assertions.assertEquals("9:04:05 PM", d.toLocaleTimeString("en-US", "America/New_York")); Assertions.assertEquals("오후 9:04:05", d.toLocaleTimeString("ko-KR", "America/New_York")); Assertions.assertEquals("04:04:05", d.toLocaleTimeString( "uk-UA", "Europe/Kiev")); Assertions.assertEquals("9:04:05 م", d.toLocaleTimeString( "ar-EG", "America/New_York")); Assertions.assertEquals("9:04:05 PM Eastern Daylight Time", d.toLocaleTimeString("en-US", JacksonUtil.newObjectNode() .put("timeZone", "America/New_York") .put("timeStyle", "full") .toString())); Assertions.assertEquals("오후 9시 4분 5초 미 동부 하계 표준시", d.toLocaleTimeString("ko-KR", JacksonUtil.newObjectNode() .put("timeZone", "America/New_York") .put("timeStyle", "full") .toString())); Assertions.assertEquals("04:04:05 за східноєвропейським літнім часом", d.toLocaleTimeString("uk-UA", JacksonUtil.newObjectNode() .put("timeZone", "Europe/Kiev") .put("timeStyle", "full") .toString())); Assertions.assertEquals("9:04:05 م التوقيت الصيفي الشرقي لأمريكا الشمالية", d.toLocaleTimeString("ar-EG", JacksonUtil.newObjectNode() .put("timeZone", "America/New_York") .put("timeStyle", "full") .toString())); Assertions.assertEquals("9:04:05 PM", d.toLocaleTimeString("en-US", JacksonUtil.newObjectNode() .put("timeZone", "America/New_York") .put("pattern", "h:mm:ss a") .toString())); Assertions.assertEquals("9:04:05 오후", d.toLocaleTimeString("ko-KR", JacksonUtil.newObjectNode() .put("timeZone", "America/New_York") .put("pattern", "h:mm:ss a") .toString())); Assertions.assertEquals("4:04:05 дп", d.toLocaleTimeString("uk-UA", JacksonUtil.newObjectNode() .put("timeZone", "Europe/Kiev") .put("pattern", "h:mm:ss a") .toString())); Assertions.assertEquals("9:04:05 م", d.toLocaleTimeString("ar-EG", JacksonUtil.newObjectNode() .put("timeZone", "America/New_York") .put("pattern", "h:mm:ss a") .toString())); }
public static GetAttributesToNodesResponse mergeAttributesToNodesResponse( Collection<GetAttributesToNodesResponse> responses) { Map<NodeAttributeKey, List<NodeToAttributeValue>> nodeAttributeMap = new HashMap<>(); for (GetAttributesToNodesResponse response : responses) { if (response != null && response.getAttributesToNodes() != null) { nodeAttributeMap.putAll(response.getAttributesToNodes()); } } return GetAttributesToNodesResponse.newInstance(nodeAttributeMap); }
@Test public void testMergeAttributesToNodesResponse() { // normal response1 NodeAttribute gpu = NodeAttribute.newInstance(NodeAttribute.PREFIX_CENTRALIZED, "GPU", NodeAttributeType.STRING, "nvidia"); Map<NodeAttributeKey, List<NodeToAttributeValue>> map1 = new HashMap<>(); List<NodeToAttributeValue> lists1 = new ArrayList<>(); NodeToAttributeValue attributeValue1 = NodeToAttributeValue.newInstance("node1", gpu.getAttributeValue()); lists1.add(attributeValue1); map1.put(gpu.getAttributeKey(), lists1); GetAttributesToNodesResponse response1 = GetAttributesToNodesResponse.newInstance(map1); // normal response2 NodeAttribute docker = NodeAttribute.newInstance(NodeAttribute.PREFIX_DISTRIBUTED, "DOCKER", NodeAttributeType.STRING, "docker0"); Map<NodeAttributeKey, List<NodeToAttributeValue>> map2 = new HashMap<>(); List<NodeToAttributeValue> lists2 = new ArrayList<>(); NodeToAttributeValue attributeValue2 = NodeToAttributeValue.newInstance("node2", docker.getAttributeValue()); lists2.add(attributeValue2); map2.put(docker.getAttributeKey(), lists2); GetAttributesToNodesResponse response2 = GetAttributesToNodesResponse.newInstance(map2); // empty response3 GetAttributesToNodesResponse response3 = GetAttributesToNodesResponse.newInstance(new HashMap<>()); // null response4 GetAttributesToNodesResponse response4 = null; List<GetAttributesToNodesResponse> responses = new ArrayList<>(); responses.add(response1); responses.add(response2); responses.add(response3); responses.add(response4); GetAttributesToNodesResponse response = RouterYarnClientUtils.mergeAttributesToNodesResponse(responses); Assert.assertNotNull(response); Assert.assertEquals(2, response.getAttributesToNodes().size()); Map<NodeAttributeKey, List<NodeToAttributeValue>> attrs = response.getAttributesToNodes(); NodeAttributeKey gpuKey = gpu.getAttributeKey(); Assert.assertEquals(attributeValue1.toString(), attrs.get(gpuKey).get(0).toString()); NodeAttributeKey dockerKey = docker.getAttributeKey(); Assert.assertEquals(attributeValue2.toString(), attrs.get(dockerKey).get(0).toString()); }
public static int symLink(String target, String linkname) throws IOException{ if (target == null || linkname == null) { LOG.warn("Can not create a symLink with a target = " + target + " and link =" + linkname); return 1; } // Run the input paths through Java's File so that they are converted to the // native OS form File targetFile = new File( Path.getPathWithoutSchemeAndAuthority(new Path(target)).toString()); File linkFile = new File( Path.getPathWithoutSchemeAndAuthority(new Path(linkname)).toString()); String[] cmd = Shell.getSymlinkCommand( targetFile.toString(), linkFile.toString()); ShellCommandExecutor shExec; try { if (Shell.WINDOWS && linkFile.getParentFile() != null && !new Path(target).isAbsolute()) { // Relative links on Windows must be resolvable at the time of // creation. To ensure this we run the shell command in the directory // of the link. // shExec = new ShellCommandExecutor(cmd, linkFile.getParentFile()); } else { shExec = new ShellCommandExecutor(cmd); } shExec.execute(); } catch (Shell.ExitCodeException ec) { int returnVal = ec.getExitCode(); if (Shell.WINDOWS && returnVal == SYMLINK_NO_PRIVILEGE) { LOG.warn("Fail to create symbolic links on Windows. " + "The default security settings in Windows disallow non-elevated " + "administrators and all non-administrators from creating symbolic links. " + "This behavior can be changed in the Local Security Policy management console"); } else if (returnVal != 0) { LOG.warn("Command '" + StringUtils.join(" ", cmd) + "' failed " + returnVal + " with: " + ec.getMessage()); } return returnVal; } catch (IOException e) { if (LOG.isDebugEnabled()) { LOG.debug("Error while create symlink " + linkname + " to " + target + "." + " Exception: " + StringUtils.stringifyException(e)); } throw e; } return shExec.getExitCode(); }
@Test public void testSymlinkWithNullInput() throws IOException { File file = new File(del, FILE); File link = new File(del, "_link"); // Create the same symbolic link // The operation should fail and returns 1 int result = FileUtil.symLink(null, null); Assert.assertEquals(1, result); // Create the same symbolic link // The operation should fail and returns 1 result = FileUtil.symLink(file.getAbsolutePath(), null); Assert.assertEquals(1, result); // Create the same symbolic link // The operation should fail and returns 1 result = FileUtil.symLink(null, link.getAbsolutePath()); Assert.assertEquals(1, result); }
@Override protected void post(final Path file, final MessageDigest digest, final StorageObject response) throws BackgroundException { this.verify(file, digest, Checksum.parse(response.getMd5sum())); }
@Test public void testPostChecksum() throws Exception { final StorageObject o = new StorageObject("f"); o.setMd5sum("d41d8cd98f00b204e9800998ecf8427e"); new SwiftSmallObjectUploadFeature(session, new SwiftWriteFeature( session, new SwiftRegionService(session))).post( new Path("/f", EnumSet.of(Path.Type.file)), MessageDigest.getInstance("MD5"), o ); }
public static Path getSegmentFinishDirPath( String basePath, TieredStoragePartitionId partitionId, int subpartitionId) { String subpartitionPath = getSubpartitionPath(basePath, partitionId, subpartitionId); return new Path(subpartitionPath, SEGMENT_FINISH_DIR_NAME); }
@Test void testGetSegmentFinishDirPath() { TieredStoragePartitionId partitionId = TieredStorageIdMappingUtils.convertId(new ResultPartitionID()); int subpartitionId = 0; String segmentFinishDirPath = SegmentPartitionFile.getSegmentFinishDirPath( tempFolder.getPath(), partitionId, subpartitionId) .getPath(); File expectedSegmentFinishDir = getSegmentFinishDir(tempFolder.getPath(), partitionId, subpartitionId); assertThat(segmentFinishDirPath).isEqualTo(expectedSegmentFinishDir.getPath()); }
@Override public CompletableFuture<Void> cleanupAsync(JobID jobId) { mainThreadExecutor.assertRunningInMainThread(); CompletableFuture<Void> cleanupFuture = FutureUtils.completedVoidFuture(); for (CleanupWithLabel<T> cleanupWithLabel : prioritizedCleanup) { cleanupFuture = cleanupFuture.thenCompose( ignoredValue -> withRetry( jobId, cleanupWithLabel.getLabel(), cleanupWithLabel.getCleanup())); } return cleanupFuture.thenCompose( ignoredValue -> FutureUtils.completeAll( regularCleanup.stream() .map( cleanupWithLabel -> withRetry( jobId, cleanupWithLabel.getLabel(), cleanupWithLabel.getCleanup())) .collect(Collectors.toList()))); }
@Test void testConcurrentCleanupWithExceptionSecond() { final SingleCallCleanup cleanup0 = SingleCallCleanup.withoutCompletionOnCleanup(); final SingleCallCleanup cleanup1 = SingleCallCleanup.withoutCompletionOnCleanup(); final CompletableFuture<Void> cleanupResult = createTestInstanceBuilder() .withRegularCleanup("Reg #0", cleanup0) .withRegularCleanup("Reg #1", cleanup1) .build() .cleanupAsync(JOB_ID); assertThat(cleanupResult).isNotCompleted(); assertThat(cleanup0).extracting(SingleCallCleanup::getProcessedJobId).isEqualTo(JOB_ID); assertThat(cleanup1).extracting(SingleCallCleanup::getProcessedJobId).isEqualTo(JOB_ID); cleanup0.completeCleanup(); assertThat(cleanupResult).isNotCompleted(); final RuntimeException expectedException = new RuntimeException("Expected exception"); cleanup1.completeCleanupExceptionally(expectedException); assertThatFuture(cleanupResult) .eventuallyFailsWith(ExecutionException.class) .extracting(FlinkAssertions::chainOfCauses, STREAM_THROWABLE) .hasExactlyElementsOfTypes( ExecutionException.class, FutureUtils.RetryException.class, CompletionException.class, expectedException.getClass()) .last() .isEqualTo(expectedException); }
@Override @Transactional(rollbackFor = Exception.class) public Long createCombinationActivity(CombinationActivityCreateReqVO createReqVO) { // 校验商品 SPU 是否存在是否参加的别的活动 validateProductConflict(createReqVO.getSpuId(), null); // 校验商品是否存在 validateProductExists(createReqVO.getSpuId(), createReqVO.getProducts()); // 插入拼团活动 CombinationActivityDO activity = CombinationActivityConvert.INSTANCE.convert(createReqVO) .setStatus(CommonStatusEnum.ENABLE.getStatus()); combinationActivityMapper.insert(activity); // 插入商品 List<CombinationProductDO> products = CombinationActivityConvert.INSTANCE.convertList(createReqVO.getProducts(), activity); combinationProductMapper.insertBatch(products); return activity.getId(); }
@Test public void testCreateCombinationActivity_success() { // 准备参数 CombinationActivityCreateReqVO reqVO = randomPojo(CombinationActivityCreateReqVO.class); // 调用 Long combinationActivityId = combinationActivityService.createCombinationActivity(reqVO); // 断言 assertNotNull(combinationActivityId); // 校验记录的属性是否正确 CombinationActivityDO combinationActivity = combinationActivityMapper.selectById(combinationActivityId); assertPojoEquals(reqVO, combinationActivity); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() != 2 && data.size() != 4) { onInvalidDataReceived(device, data); return; } final int sessionRunTime = data.getIntValue(Data.FORMAT_UINT16_LE, 0); final boolean crcPresent = data.size() == 4; if (crcPresent) { final int actualCrc = CRC16.MCRF4XX(data.getValue(), 0, 2); final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, 2); if (actualCrc != expectedCrc) { onContinuousGlucoseMonitorSessionRunTimeReceivedWithCrcError(device, data); return; } } onContinuousGlucoseMonitorSessionRunTimeReceived(device, sessionRunTime, crcPresent); }
@Test public void onContinuousGlucoseMonitorSessionRunTimeReceived_withCrc() { final DataReceivedCallback callback = new CGMSessionRunTimeDataCallback() { @Override public void onContinuousGlucoseMonitorSessionRunTimeReceived(@NonNull final BluetoothDevice device, final int sessionRunTime, final boolean secured) { called = true; assertEquals("Session Run Time", 2, sessionRunTime); assertTrue(secured); } @Override public void onContinuousGlucoseMonitorSessionRunTimeReceivedWithCrcError(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Correct packet but invalid CRC reported", 1, 2); } @Override public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Correct packet but invalid data reported", 1, 2); } }; final MutableData data = new MutableData(new byte[4]); assertTrue(data.setValue(2, Data.FORMAT_UINT16_LE, 0)); assertTrue(data.setValue(0xC308, Data.FORMAT_UINT16_LE, 2)); called = false; //noinspection DataFlowIssue callback.onDataReceived(null, data); assertTrue(called); }
@Override public String getColumnLabel() { ProjectionIdentifierExtractEngine extractEngine = new ProjectionIdentifierExtractEngine(databaseType); return getAlias().isPresent() && !DerivedColumn.isDerivedColumnName(getAlias().get().getValueWithQuoteCharacters()) ? extractEngine.getIdentifierValue(getAlias().get()) : extractEngine.getColumnNameFromFunction(type.name(), expression); }
@Test void assertGetColumnLabelWithAliasAndQuote() { assertThat(new AggregationProjection(AggregationType.COUNT, "COUNT( A.\"DIRECTION\" )", new IdentifierValue("DIRECTION_COUNT", QuoteCharacter.BACK_QUOTE), TypedSPILoader.getService(DatabaseType.class, "MySQL")).getColumnLabel(), is("DIRECTION_COUNT")); assertThat(new AggregationProjection(AggregationType.COUNT, "COUNT( A.\"DIRECTION\" )", new IdentifierValue("DIRECTION_COUNT", QuoteCharacter.QUOTE), TypedSPILoader.getService(DatabaseType.class, "PostgreSQL")).getColumnLabel(), is("DIRECTION_COUNT")); assertThat(new AggregationProjection(AggregationType.COUNT, "COUNT( A.\"DIRECTION\" )", new IdentifierValue("DIRECTION_COUNT", QuoteCharacter.QUOTE), TypedSPILoader.getService(DatabaseType.class, "openGauss")).getColumnLabel(), is("DIRECTION_COUNT")); assertThat(new AggregationProjection(AggregationType.COUNT, "COUNT( A.\"DIRECTION\" )", new IdentifierValue("direction_count", QuoteCharacter.QUOTE), TypedSPILoader.getService(DatabaseType.class, "Oracle")).getColumnLabel(), is("direction_count")); }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyFailsWithSameToStringAndListWithNull() { expectFailureWhenTestingThat(asList(1L, 2L)).containsExactly(null, 1, 2); assertFailureValue( "missing (3)", "null (null type), 1 (java.lang.Integer), 2 (java.lang.Integer)"); assertFailureValue("unexpected (2)", "1, 2 (java.lang.Long)"); }
public static Exception bkException(String operation, int rc, long ledgerId, long entryId) { String message = org.apache.bookkeeper.client.api.BKException.getMessage(rc) + " - ledger=" + ledgerId + " - operation=" + operation; if (entryId != -1) { message += " - entry=" + entryId; } boolean recoverable = rc != BKException.Code.NoSuchLedgerExistsException && rc != BKException.Code.NoSuchEntryException && rc != BKException.Code.NoSuchLedgerExistsOnMetadataServerException; return new SchemaException(recoverable, message); }
@Test public void testBkException() { Exception ex = bkException("test", BKException.Code.ReadException, 1, -1); assertEquals("Error while reading ledger - ledger=1 - operation=test", ex.getMessage()); ex = bkException("test", BKException.Code.ReadException, 1, 0); assertEquals("Error while reading ledger - ledger=1 - operation=test - entry=0", ex.getMessage()); ex = bkException("test", BKException.Code.QuorumException, 1, -1); assertEquals("Invalid quorum size on ensemble size - ledger=1 - operation=test", ex.getMessage()); ex = bkException("test", BKException.Code.QuorumException, 1, 0); assertEquals("Invalid quorum size on ensemble size - ledger=1 - operation=test - entry=0", ex.getMessage()); }
@Override protected Future<KafkaMirrorMakerStatus> createOrUpdate(Reconciliation reconciliation, KafkaMirrorMaker assemblyResource) { String namespace = reconciliation.namespace(); KafkaMirrorMakerCluster mirror; KafkaMirrorMakerStatus kafkaMirrorMakerStatus = new KafkaMirrorMakerStatus(); try { mirror = KafkaMirrorMakerCluster.fromCrd(reconciliation, assemblyResource, versions, sharedEnvironmentProvider); } catch (Exception e) { LOGGER.warnCr(reconciliation, e); StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaMirrorMakerStatus, e); return Future.failedFuture(new ReconciliationException(kafkaMirrorMakerStatus, e)); } Map<String, String> annotations = new HashMap<>(1); KafkaClientAuthentication authConsumer = assemblyResource.getSpec().getConsumer().getAuthentication(); List<CertSecretSource> trustedCertificatesConsumer = assemblyResource.getSpec().getConsumer().getTls() == null ? Collections.emptyList() : assemblyResource.getSpec().getConsumer().getTls().getTrustedCertificates(); KafkaClientAuthentication authProducer = assemblyResource.getSpec().getProducer().getAuthentication(); List<CertSecretSource> trustedCertificatesProducer = assemblyResource.getSpec().getProducer().getTls() == null ? Collections.emptyList() : assemblyResource.getSpec().getProducer().getTls().getTrustedCertificates(); Promise<KafkaMirrorMakerStatus> createOrUpdatePromise = Promise.promise(); boolean mirrorHasZeroReplicas = mirror.getReplicas() == 0; LOGGER.debugCr(reconciliation, "Updating Kafka Mirror Maker cluster"); mirrorMakerServiceAccount(reconciliation, namespace, mirror) .compose(i -> deploymentOperations.scaleDown(reconciliation, namespace, mirror.getComponentName(), mirror.getReplicas(), operationTimeoutMs)) .compose(i -> MetricsAndLoggingUtils.metricsAndLogging(reconciliation, configMapOperations, mirror.logging(), mirror.metrics())) .compose(metricsAndLoggingCm -> { ConfigMap logAndMetricsConfigMap = mirror.generateMetricsAndLogConfigMap(metricsAndLoggingCm); annotations.put(Annotations.ANNO_STRIMZI_LOGGING_HASH, Util.hashStub(logAndMetricsConfigMap.getData().get(mirror.logging().configMapKey()))); return configMapOperations.reconcile(reconciliation, namespace, KafkaMirrorMakerResources.metricsAndLogConfigMapName(reconciliation.name()), logAndMetricsConfigMap); }) .compose(i -> podDisruptionBudgetOperator.reconcile(reconciliation, namespace, mirror.getComponentName(), mirror.generatePodDisruptionBudget())) .compose(i -> Future.join(VertxUtil.authTlsHash(secretOperations, namespace, authConsumer, trustedCertificatesConsumer), VertxUtil.authTlsHash(secretOperations, namespace, authProducer, trustedCertificatesProducer))) .compose(hashFut -> { if (hashFut != null) { annotations.put(Annotations.ANNO_STRIMZI_AUTH_HASH, Integer.toString((int) hashFut.resultAt(0) + (int) hashFut.resultAt(1))); } return Future.succeededFuture(); }) .compose(i -> deploymentOperations.reconcile(reconciliation, namespace, mirror.getComponentName(), mirror.generateDeployment(annotations, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets))) .compose(i -> deploymentOperations.scaleUp(reconciliation, namespace, mirror.getComponentName(), mirror.getReplicas(), operationTimeoutMs)) .compose(i -> deploymentOperations.waitForObserved(reconciliation, namespace, mirror.getComponentName(), 1_000, operationTimeoutMs)) .compose(i -> mirrorHasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(reconciliation, namespace, mirror.getComponentName(), 1_000, operationTimeoutMs)) .onComplete(reconciliationResult -> { StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaMirrorMakerStatus, reconciliationResult.cause()); // Add warning about Mirror Maker 1 being deprecated and removed soon LOGGER.warnCr(reconciliation, "Mirror Maker 1 is deprecated and will be removed in Apache Kafka 4.0.0. Please migrate to Mirror Maker 2."); StatusUtils.addConditionsToStatus(kafkaMirrorMakerStatus, Set.of(StatusUtils.buildWarningCondition("MirrorMaker1Deprecation", "Mirror Maker 1 is deprecated and will be removed in Apache Kafka 4.0.0. Please migrate to Mirror Maker 2."))); kafkaMirrorMakerStatus.setReplicas(mirror.getReplicas()); kafkaMirrorMakerStatus.setLabelSelector(mirror.getSelectorLabels().toSelectorString()); if (reconciliationResult.succeeded()) { createOrUpdatePromise.complete(kafkaMirrorMakerStatus); } else { createOrUpdatePromise.fail(new ReconciliationException(kafkaMirrorMakerStatus, reconciliationResult.cause())); } } ); return createOrUpdatePromise.future(); }
@Test public void testUpdateClusterScaleDown(VertxTestContext context) { int scaleTo = 2; ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); CrdOperator mockMirrorOps = supplier.mirrorMakerOperator; DeploymentOperator mockDcOps = supplier.deploymentOperations; PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator; ConfigMapOperator mockCmOps = supplier.configMapOperations; String kmmName = "foo"; String kmmNamespace = "test"; KafkaMirrorMakerConsumerSpec consumer = new KafkaMirrorMakerConsumerSpecBuilder() .withBootstrapServers(consumerBootstrapServers) .withGroupId(groupId) .withNumStreams(numStreams) .build(); KafkaMirrorMakerProducerSpec producer = new KafkaMirrorMakerProducerSpecBuilder() .withBootstrapServers(producerBootstrapServers) .build(); KafkaMirrorMaker kmm = ResourceUtils.createKafkaMirrorMaker(kmmNamespace, kmmName, image, producer, consumer, include); KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm, VERSIONS, SHARED_ENV_PROVIDER); kmm.getSpec().setReplicas(scaleTo); // Change replicas to create ScaleDown when(mockMirrorOps.get(kmmNamespace, kmmName)).thenReturn(kmm); when(mockMirrorOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm)); when(mockMirrorOps.updateStatusAsync(any(), any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture()); when(mockDcOps.get(kmmNamespace, mirror.getComponentName())).thenReturn(mirror.generateDeployment(new HashMap<>(), true, null, null)); when(mockDcOps.readiness(any(), eq(kmmNamespace), eq(mirror.getComponentName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockDcOps.reconcile(any(), eq(kmmNamespace), any(), any())).thenReturn(Future.succeededFuture()); doAnswer(i -> Future.succeededFuture(scaleTo)) .when(mockDcOps).scaleUp(any(), eq(kmmNamespace), eq(mirror.getComponentName()), eq(scaleTo), anyLong()); doAnswer(i -> Future.succeededFuture(scaleTo)) .when(mockDcOps).scaleDown(any(), eq(kmmNamespace), eq(mirror.getComponentName()), eq(scaleTo), anyLong()); when(mockMirrorOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker()))); when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); KafkaMirrorMakerAssemblyOperator ops = new KafkaMirrorMakerAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), new MockCertManager(), new PasswordGenerator(10, "a", "a"), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaMirrorMaker.RESOURCE_KIND, kmmNamespace, kmmName), kmm) .onComplete(context.succeeding(v -> context.verify(() -> { verify(mockDcOps).scaleUp(any(), eq(kmmNamespace), eq(mirror.getComponentName()), eq(scaleTo), anyLong()); async.flag(); }))); }
@Override public void execute(ComputationStep.Context context) { new DepthTraversalTypeAwareCrawler( new TypeAwareVisitorAdapter(CrawlerDepthLimit.PROJECT, PRE_ORDER) { @Override public void visitProject(Component project) { executeForProject(project); } }).visit(treeRootHolder.getRoot()); }
@Test void no_measure_if_tree_has_no_project() { ReportComponent notAProjectComponent = ReportComponent.builder(Component.Type.DIRECTORY, 1).build(); treeRootHolder.setRoot(notAProjectComponent); underTest.execute(new TestComputationStepContext()); assertTrue(measureRepository.getAddedRawMeasures(1).isEmpty()); }
@Override public <T extends GetWorkBudgetSpender> void distributeBudget( ImmutableCollection<T> budgetOwners, GetWorkBudget getWorkBudget) { if (budgetOwners.isEmpty()) { LOG.debug("Cannot distribute budget to no owners."); return; } if (getWorkBudget.equals(GetWorkBudget.noBudget())) { LOG.debug("Cannot distribute 0 budget."); return; } Map<T, GetWorkBudget> desiredBudgets = computeDesiredBudgets(budgetOwners, getWorkBudget); for (Entry<T, GetWorkBudget> streamAndDesiredBudget : desiredBudgets.entrySet()) { GetWorkBudgetSpender getWorkBudgetSpender = streamAndDesiredBudget.getKey(); GetWorkBudget desired = streamAndDesiredBudget.getValue(); GetWorkBudget remaining = getWorkBudgetSpender.remainingBudget(); if (isBelowFiftyPercentOfTarget(remaining, desired)) { GetWorkBudget adjustment = desired.subtract(remaining); getWorkBudgetSpender.adjustBudget(adjustment); } } }
@Test public void testDistributeBudget_doesNothingWhenPassedInStreamsEmpty() { createBudgetDistributor(1L) .distributeBudget( ImmutableList.of(), GetWorkBudget.builder().setItems(10L).setBytes(10L).build()); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { if (!(statement.getStatement() instanceof CreateSource) && !(statement.getStatement() instanceof CreateAsSelect)) { return statement; } try { if (statement.getStatement() instanceof CreateSource) { final ConfiguredStatement<CreateSource> createStatement = (ConfiguredStatement<CreateSource>) statement; return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement); } else { final ConfiguredStatement<CreateAsSelect> createStatement = (ConfiguredStatement<CreateAsSelect>) statement; return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse( createStatement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } }
@Test public void shouldUseKeySchemaIdWhenTableElementsPresent() { // Given: givenFormatsAndProps( "protobuf", "avro", ImmutableMap.of("KEY_SCHEMA_ID", new IntegerLiteral(42))); when(ct.getElements()).thenReturn(SOME_KEY_ELEMENTS_TABLE); // When: final Exception e = assertThrows( KsqlException.class, () -> injector.inject(ctStatement) ); // Then: assertThat(e.getMessage(), containsString("Table elements and KEY_SCHEMA_ID cannot both exist for create statement.")); }
@Override public void notify(ConfigChangeEvent value) { if (value.getEventType().equals(EventType.DELETE)) { settingsString = null; this.agentConfigurationsTable = new AgentConfigurationsTable(); } else { settingsString = value.getNewValue(); AgentConfigurationsReader agentConfigurationsReader = new AgentConfigurationsReader(new StringReader(value.getNewValue())); this.agentConfigurationsTable = agentConfigurationsReader.readAgentConfigurationsTable(); } }
@Test public void testConfigModifyEvent() throws IOException { AgentConfigurationsTable agentConfigurationsTable = Whitebox.getInternalState( agentConfigurationsWatcher, "agentConfigurationsTable"); assertTrue(agentConfigurationsTable.getAgentConfigurationsCache().isEmpty()); Reader reader = ResourceUtils.read("agent-dynamic-configuration.yml"); char[] chars = new char[1024 * 1024]; int length = reader.read(chars); agentConfigurationsWatcher.notify(new ConfigChangeWatcher.ConfigChangeEvent( new String(chars, 0, length), ConfigChangeWatcher.EventType.MODIFY )); AgentConfigurationsTable modifyAgentConfigurationsTable = Whitebox.getInternalState( agentConfigurationsWatcher, "agentConfigurationsTable"); Map<String, AgentConfigurations> configurationCache = modifyAgentConfigurationsTable.getAgentConfigurationsCache(); Assertions.assertEquals(2, configurationCache.size()); AgentConfigurations agentConfigurations0 = configurationCache.get("serviceA"); Assertions.assertEquals("serviceA", agentConfigurations0.getService()); Assertions.assertEquals(2, agentConfigurations0.getConfiguration().size()); Assertions.assertEquals("1000", agentConfigurations0.getConfiguration().get("trace.sample_rate")); Assertions.assertEquals( "/api/seller/seller/*", agentConfigurations0.getConfiguration().get("trace.ignore_path")); Assertions.assertEquals( "92670f1ccbdee60e14ffc054d70a5cf3f93f6b5fb1adb83b10bea4fec79b96e7bc5e7b188e231428853721ded42ec756663947316065617f3cfdf51d6dfc8da6", agentConfigurations0.getUuid() ); AgentConfigurations agentConfigurations1 = configurationCache.get("serviceB"); Assertions.assertEquals("serviceB", agentConfigurations1.getService()); Assertions.assertEquals(2, agentConfigurations1.getConfiguration().size()); Assertions.assertEquals("1000", agentConfigurations1.getConfiguration().get("trace.sample_rate")); Assertions.assertEquals( "/api/seller/seller/*", agentConfigurations1.getConfiguration().get("trace.ignore_path")); Assertions.assertEquals( "92670f1ccbdee60e14ffc054d70a5cf3f93f6b5fb1adb83b10bea4fec79b96e7bc5e7b188e231428853721ded42ec756663947316065617f3cfdf51d6dfc8da6", agentConfigurations0.getUuid() ); }
@Override public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException { try { final File copy = session.getClient().files().copy(fileid.getFileId(source), new File() .setParents(Collections.singletonList(fileid.getFileId(target.getParent()))) .setName(target.getName())) .setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")).execute(); listener.sent(status.getLength()); fileid.cache(target, copy.getId()); return target.withAttributes(new DriveAttributesFinderFeature(session, fileid).toAttributes(copy)); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Cannot copy {0}", e, source); } }
@Test public void testCopyFile() throws Exception { final Path test = new Path(DriveHomeFinderService.MYDRIVE_FOLDER, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final DriveFileIdProvider fileid = new DriveFileIdProvider(session); final TransferStatus status = new TransferStatus(); new DriveTouchFeature(session, fileid).touch(test, status); final Path copy = new Path(DriveHomeFinderService.MYDRIVE_FOLDER, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final Path target = new DriveCopyFeature(session, fileid).copy(test, copy, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener()); assertNotEquals(test.attributes().getFileId(), target.attributes().getFileId()); final Find find = new DefaultFindFeature(session); assertTrue(find.find(test)); assertTrue(find.find(copy)); new DriveDeleteFeature(session, fileid).delete(Arrays.asList(test, copy), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
static AllOrNoneValueSet all(Type type) { return new AllOrNoneValueSet(type, true); }
@Test public void testAll() { AllOrNoneValueSet valueSet = AllOrNoneValueSet.all(HYPER_LOG_LOG); assertEquals(valueSet.getType(), HYPER_LOG_LOG); assertFalse(valueSet.isNone()); assertTrue(valueSet.isAll()); assertFalse(valueSet.isSingleValue()); assertTrue(valueSet.containsValue(Slices.EMPTY_SLICE)); assertThrows(UnsupportedOperationException.class, valueSet::getSingleValue); }
public static String approximatePTransformName(Class<?> clazz) { checkArgument(PTransform.class.isAssignableFrom(clazz)); if (clazz.getSimpleName().startsWith("AutoValue_")) { return approximatePTransformName(clazz.getSuperclass()); } return approximateSimpleName(clazz, /* dropOuterClassNames */ false) .replaceFirst("\\.Bound$", ""); }
@Test public void testPTransformName() { EmbeddedPTransform transform = new EmbeddedPTransform(); assertEquals( "NameUtilsTest.EmbeddedPTransform", NameUtils.approximatePTransformName(transform.getClass())); assertEquals( "NameUtilsTest.EmbeddedPTransform", NameUtils.approximatePTransformName(transform.getBound().getClass())); assertEquals( "NameUtilsTest.SomeTransform", NameUtils.approximatePTransformName(AutoValue_NameUtilsTest_SomeTransform.class)); assertEquals("TextIO.Write", NameUtils.approximatePTransformName(TextIO.Write.class)); }
public void generateTypeStubs() throws IOException { generateMetaAttributeEnum(); for (final List<Token> tokens : ir.types()) { switch (tokens.get(0).signal()) { case BEGIN_ENUM: generateEnum(tokens); break; case BEGIN_SET: generateBitSet(tokens); break; case BEGIN_COMPOSITE: generateComposite(tokens); break; default: break; } } }
@Test void shouldGenerateCharEnumStub() throws Exception { generateTypeStubs(); final Class<?> clazz = compileModel(); final Object result = getByte(clazz, (byte)'B'); assertThat(result, hasToString("B")); }
ControllerResult<ProducerIdsBlock> generateNextProducerId(int brokerId, long brokerEpoch) { clusterControlManager.checkBrokerEpoch(brokerId, brokerEpoch); long firstProducerIdInBlock = nextProducerBlock.get().firstProducerId(); if (firstProducerIdInBlock > Long.MAX_VALUE - ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE) { throw new UnknownServerException("Exhausted all producerIds as the next block's end producerId " + "has exceeded the int64 type limit"); } ProducerIdsBlock block = new ProducerIdsBlock(brokerId, firstProducerIdInBlock, ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE); long newNextProducerId = block.nextBlockFirstId(); ProducerIdsRecord record = new ProducerIdsRecord() .setNextProducerId(newNextProducerId) .setBrokerId(brokerId) .setBrokerEpoch(brokerEpoch); return ControllerResult.of(Collections.singletonList(new ApiMessageAndVersion(record, (short) 0)), block); }
@Test public void testInitialResult() { ControllerResult<ProducerIdsBlock> result = producerIdControlManager.generateNextProducerId(1, 100); assertEquals(0, result.response().firstProducerId()); assertEquals(1000, result.response().size()); ProducerIdsRecord record = (ProducerIdsRecord) result.records().get(0).message(); assertEquals(1000, record.nextProducerId()); }
@Override public Optional<Lock> lock(@Nonnull String resource, @Nullable String lockContext) { return doLock(resource, getLockedByString(lockContext)); }
@Test void reentrantLock() { final Lock orig = lockService.lock("test-resource", null) .orElseThrow(() -> new IllegalStateException("Unable to create original lock.")); final Optional<Lock> lock = lockService.lock("test-resource", null); assertThat(lock).hasValueSatisfying(l -> { assertThat(l.resource()).isEqualTo(orig.resource()); assertThat(l.lockedBy()).isEqualTo(orig.lockedBy()); assertThat(l.createdAt()).isEqualTo(orig.createdAt()); assertThat(l.updatedAt()).isAfter(orig.updatedAt()); }); }
@Override public String getType() { return EMPTY_SERVICE; }
@Test void testGetType() { assertEquals("emptyService", emptyServiceAutoCleanerV2.getType()); }
@PostMapping("/plugin/saveOrUpdate") public Mono<String> saveOrUpdate(@RequestBody final PluginData pluginData) { LOG.info("saveOrUpdate apache shenyu local plugin for {}", pluginData.getName()); subscriber.onSubscribe(pluginData); return Mono.just(Constants.SUCCESS); }
@Test public void testSaveOrUpdate() throws Exception { final String testPluginName = "testSavePluginName"; final MockHttpServletResponse response = this.mockMvc.perform(MockMvcRequestBuilders.post("/shenyu/plugin/saveOrUpdate") .content(GsonUtils.getGson().toJson(createTestCleanPlugin(testPluginName))) .contentType(MediaType.APPLICATION_JSON)) .andReturn() .getResponse(); assertThat(response.getStatus()).isEqualTo(HttpStatus.OK.value()); assertThat(baseDataCache.obtainPluginData(testPluginName)).isNotNull(); assertThat(baseDataCache.obtainPluginData(testPluginName).getName()).isEqualTo(testPluginName); }
void gracefulFailoverToYou() throws ServiceFailedException, IOException { try { UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { doGracefulFailover(); return null; } }); } catch (InterruptedException e) { throw new IOException(e); } }
@Test public void testOneOfEverything() throws Exception { cluster.start(); // Failover by session expiration LOG.info("====== Failing over by session expiration"); cluster.expireAndVerifyFailover(0, 1); cluster.expireAndVerifyFailover(1, 0); // Restart ZK LOG.info("====== Restarting server"); stopServer(); waitForServerDown(hostPort, CONNECTION_TIMEOUT); startServer(); waitForServerUp(hostPort, CONNECTION_TIMEOUT); // Failover by bad health cluster.setHealthy(0, false); cluster.waitForHAState(0, HAServiceState.INITIALIZING); cluster.waitForHAState(1, HAServiceState.ACTIVE); cluster.setHealthy(1, true); cluster.setHealthy(0, false); cluster.waitForHAState(1, HAServiceState.ACTIVE); cluster.waitForHAState(0, HAServiceState.INITIALIZING); cluster.setHealthy(0, true); cluster.waitForHealthState(0, State.SERVICE_HEALTHY); // Graceful failovers cluster.getZkfc(1).gracefulFailoverToYou(); cluster.getZkfc(0).gracefulFailoverToYou(); }
public static Object convertValue(final Object value, final Class<?> convertType) throws SQLFeatureNotSupportedException { ShardingSpherePreconditions.checkNotNull(convertType, () -> new SQLFeatureNotSupportedException("Type can not be null")); if (null == value) { return convertNullValue(convertType); } if (value.getClass() == convertType) { return value; } if (value instanceof LocalDateTime) { return convertLocalDateTimeValue((LocalDateTime) value, convertType); } if (value instanceof Timestamp) { return convertTimestampValue((Timestamp) value, convertType); } if (URL.class.equals(convertType)) { return convertURL(value); } if (value instanceof Number) { return convertNumberValue(value, convertType); } if (value instanceof Date) { return convertDateValue((Date) value, convertType); } if (value instanceof byte[]) { return convertByteArrayValue((byte[]) value, convertType); } if (boolean.class.equals(convertType)) { return convertBooleanValue(value); } if (String.class.equals(convertType)) { return value.toString(); } try { return convertType.cast(value); } catch (final ClassCastException ignored) { throw new SQLFeatureNotSupportedException("getObject with type"); } }
@Test void assertConvertBooleanValue() throws SQLException { assertFalse((boolean) ResultSetUtils.convertValue("-2", boolean.class)); assertTrue((boolean) ResultSetUtils.convertValue("1", boolean.class)); }
@Override public void install() { // ensure that a config file is always defined, for compatibility with // ZK and Kafka which check for the system property and existence of the file priorConfigFile = System.getProperty(JAVA_SECURITY_AUTH_LOGIN_CONFIG, null); if (priorConfigFile == null) { File configFile = generateDefaultConfigFile(workingDir); System.setProperty(JAVA_SECURITY_AUTH_LOGIN_CONFIG, configFile.getAbsolutePath()); LOG.info("Jaas file will be created as {}.", configFile); } // read the JAAS configuration file priorConfig = javax.security.auth.login.Configuration.getConfiguration(); // construct a dynamic JAAS configuration currentConfig = new DynamicConfiguration(priorConfig); // wire up the configured JAAS login contexts to use the krb5 entries AppConfigurationEntry[] krb5Entries = getAppConfigurationEntries(securityConfig); if (krb5Entries != null) { for (String app : securityConfig.getLoginContextNames()) { currentConfig.addAppConfigurationEntry(app, krb5Entries); } } javax.security.auth.login.Configuration.setConfiguration(currentConfig); }
@Test public void testCreateJaasModuleFileInTemporary() throws IOException { Configuration configuration = new Configuration(); SecurityConfiguration sc = new SecurityConfiguration(configuration); JaasModule module = new JaasModule(sc); module.install(); assertJaasFileLocateInRightDirectory(CoreOptions.TMP_DIRS.defaultValue()); }
public DateTokenConverter<Object> getPrimaryDateTokenConverter() { Converter<Object> p = headTokenConverter; while (p != null) { if (p instanceof DateTokenConverter) { DateTokenConverter<Object> dtc = (DateTokenConverter<Object>) p; // only primary converters should be returned as if(dtc.isPrimary()) return dtc; } p = p.getNext(); } return null; }
@Test public void nullTimeZoneByDefault() { FileNamePattern fnp = new FileNamePattern("%d{hh}", context); assertNull(fnp.getPrimaryDateTokenConverter().getTimeZone()); }
public static void copy(int[] src, long[] dest, int length) { for (int i = 0; i < length; i++) { dest[i] = src[i]; } }
@Test public void testCopyFromFloatArray() { ArrayCopyUtils.copy(FLOAT_ARRAY, INT_BUFFER, COPY_LENGTH); ArrayCopyUtils.copy(FLOAT_ARRAY, LONG_BUFFER, COPY_LENGTH); ArrayCopyUtils.copy(FLOAT_ARRAY, DOUBLE_BUFFER, COPY_LENGTH); ArrayCopyUtils.copy(FLOAT_ARRAY, STRING_BUFFER, COPY_LENGTH); for (int i = 0; i < COPY_LENGTH; i++) { Assert.assertEquals(INT_BUFFER[i], (int) FLOAT_ARRAY[i]); Assert.assertEquals(LONG_BUFFER[i], (long) FLOAT_ARRAY[i]); Assert.assertEquals(DOUBLE_BUFFER[i], (double) FLOAT_ARRAY[i]); Assert.assertEquals(STRING_BUFFER[i], Float.toString(FLOAT_ARRAY[i])); } }
public <ConfigType extends ConfigInstance> ConfigType toInstance(Class<ConfigType> clazz, String configId) { return ConfigInstanceUtil.getNewInstance(clazz, configId, this); }
@Test public void test_simple_map() { Slime slime = new Slime(); Cursor map = slime.setObject().setObject("stringmap"); map.setString("key","val"); MaptypesConfig config = new ConfigPayload(slime).toInstance(MaptypesConfig.class, ""); assertThat(config.stringmap("key"), is("val")); }
@Override public SplitPatternTokenizer clone() { try { SplitPatternTokenizer copy = (SplitPatternTokenizer) super.clone(); copy.postConfig(); //ready is set in postConfig. return copy; } catch (CloneNotSupportedException e) { throw new AssertionError("SplitPatternTokenizer is Cloneable, but the clone call failed."); } }
@Test public void testClone() { SplitPatternTokenizer tokenizer = new SplitPatternTokenizer(); testClones(tokenizer, "1.0n", "1.0n"); testClones(tokenizer, "1. 0n", "1", "0n"); testClones(tokenizer, "a .10n", "a", ".10n"); testClones(tokenizer, "a ,10n", "a", ",10n"); testClones(tokenizer, "a, b, and c", "a", "b", "and", "c"); tokenizer = new SplitPatternTokenizer("\\s+"); testClones(tokenizer, "a b c", "a", "b", "c"); testClones(tokenizer, ""); testClones(tokenizer, " "); testClones(tokenizer, " a", "a"); testClones(tokenizer, "hello there!", "hello", "there!"); }
public void asyncRequest(Request request, RequestCallBack callback) throws NacosException { int retryTimes = 0; Throwable exceptionToThrow = null; long start = System.currentTimeMillis(); while (retryTimes <= rpcClientConfig.retryTimes() && System.currentTimeMillis() < start + callback .getTimeout()) { boolean waitReconnect = false; try { if (this.currentConnection == null || !isRunning()) { waitReconnect = true; throw new NacosException(NacosException.CLIENT_DISCONNECT, "Client not connected."); } this.currentConnection.asyncRequest(request, callback); return; } catch (Throwable e) { if (waitReconnect) { try { // wait client to reconnect. Thread.sleep(Math.min(100, callback.getTimeout() / 3)); } catch (Exception exception) { // Do nothing. } } LoggerUtils.printIfErrorEnabled(LOGGER, "[{}] Send request fail, request = {}, retryTimes = {}, errorMessage = {}", rpcClientConfig.name(), request, retryTimes, e.getMessage()); exceptionToThrow = e; } retryTimes++; } if (rpcClientStatus.compareAndSet(RpcClientStatus.RUNNING, RpcClientStatus.UNHEALTHY)) { switchServerAsyncOnRequestFail(); } if (exceptionToThrow != null) { throw (exceptionToThrow instanceof NacosException) ? (NacosException) exceptionToThrow : new NacosException(SERVER_ERROR, exceptionToThrow); } else { throw new NacosException(SERVER_ERROR, "AsyncRequest fail, unknown error"); } }
@Test void testAsyncRequestSuccess() throws NacosException { rpcClient.currentConnection = connection; rpcClient.rpcClientStatus.set(RpcClientStatus.RUNNING); RequestCallBack<?> requestCallBack = mock(RequestCallBack.class); when(requestCallBack.getTimeout()).thenReturn(1000L); rpcClient.asyncRequest(null, requestCallBack); verify(connection).asyncRequest(any(), any()); }
@SuppressWarnings("WeakerAccess") public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); checkIfUnexpectedUserSpecifiedConsumerConfig(clientProvidedProps, NON_CONFIGURABLE_PRODUCER_EOS_CONFIGS); // generate producer configs from original properties and overridden maps final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(getClientCustomProps()); props.putAll(clientProvidedProps); // When using EOS alpha, stream should auto-downgrade the transactional commit protocol to be compatible with older brokers. if (StreamsConfigUtils.processingMode(this) == StreamsConfigUtils.ProcessingMode.EXACTLY_ONCE_ALPHA) { props.put("internal.auto.downgrade.txn.commit", true); } props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); // add client id with stream client id prefix props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId); return props; }
@SuppressWarnings("deprecation") @Test public void shouldNotSetInternalAutoDowngradeTxnCommitToTrueInProducerForEosBeta() { props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, EXACTLY_ONCE_BETA); final StreamsConfig streamsConfig = new StreamsConfig(props); final Map<String, Object> producerConfigs = streamsConfig.getProducerConfigs(clientId); assertThat(producerConfigs.get("internal.auto.downgrade.txn.commit"), is(nullValue())); }
@Override public long read() { return gaugeSource.read(); }
@Test public void whenLongProbe() { metricsRegistry.registerStaticProbe(this, "foo", MANDATORY, (LongProbeFunction) o -> 10); LongGauge gauge = metricsRegistry.newLongGauge("foo"); assertEquals(10, gauge.read()); }
@Override public int updateNotifyMessageRead(Collection<Long> ids, Long userId, Integer userType) { return notifyMessageMapper.updateListRead(ids, userId, userType); }
@Test public void testUpdateNotifyMessageRead() { // mock 数据 NotifyMessageDO dbNotifyMessage = randomPojo(NotifyMessageDO.class, o -> { // 等会查询到 o.setUserId(1L); o.setUserType(UserTypeEnum.ADMIN.getValue()); o.setReadStatus(false); o.setReadTime(null); o.setTemplateParams(randomTemplateParams()); }); notifyMessageMapper.insert(dbNotifyMessage); // 测试 userId 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserId(2L))); // 测试 userType 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserType(UserTypeEnum.MEMBER.getValue()))); // 测试 readStatus 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setReadStatus(true))); // 准备参数 Collection<Long> ids = Arrays.asList(dbNotifyMessage.getId(), dbNotifyMessage.getId() + 1, dbNotifyMessage.getId() + 2, dbNotifyMessage.getId() + 3); Long userId = 1L; Integer userType = UserTypeEnum.ADMIN.getValue(); // 调用 int updateCount = notifyMessageService.updateNotifyMessageRead(ids, userId, userType); // 断言 assertEquals(1, updateCount); NotifyMessageDO notifyMessage = notifyMessageMapper.selectById(dbNotifyMessage.getId()); assertTrue(notifyMessage.getReadStatus()); assertNotNull(notifyMessage.getReadTime()); }
public static void closeAllQuietly(AtomicReference<Throwable> firstException, String name, AutoCloseable... closeables) { for (AutoCloseable closeable : closeables) closeQuietly(closeable, name, firstException); }
@Test public void testCloseAllQuietly() { AtomicReference<Throwable> exception = new AtomicReference<>(); String msg = "you should fail"; AtomicInteger count = new AtomicInteger(0); AutoCloseable c0 = () -> { throw new RuntimeException(msg); }; AutoCloseable c1 = count::incrementAndGet; Utils.closeAllQuietly(exception, "test", Stream.of(c0, c1).toArray(AutoCloseable[]::new)); assertEquals(msg, exception.get().getMessage()); assertEquals(1, count.get()); }
@Override public Lock writeLock() { return writeLock; }
@Test(timeout=10000) public void testWriteLock() throws Exception { String testname = name.getMethodName(); final ThreadLocal<Boolean> locked = new ThreadLocal<Boolean>(); locked.set(Boolean.FALSE); InstrumentedReadWriteLock readWriteLock = new InstrumentedReadWriteLock( true, testname, LOG, 2000, 300); final AutoCloseableLock writeLock = new AutoCloseableLock( readWriteLock.writeLock()) { @Override public AutoCloseableLock acquire() { AutoCloseableLock lock = super.acquire(); locked.set(Boolean.TRUE); return lock; } @Override public void release() { super.release(); locked.set(Boolean.FALSE); } }; final AutoCloseableLock readLock = new AutoCloseableLock( readWriteLock.readLock()); try (AutoCloseableLock lock = writeLock.acquire()) { Thread competingWriteThread = new Thread() { @Override public void run() { assertFalse(writeLock.tryLock()); } }; competingWriteThread.start(); competingWriteThread.join(); Thread competingReadThread = new Thread() { @Override public void run() { assertFalse(readLock.tryLock()); }; }; competingReadThread.start(); competingReadThread.join(); } assertFalse(locked.get()); locked.remove(); }
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) { // check the magic value if (!records.hasMatchingMagic(toMagic)) return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder); else // Do in-place validation, offset assignment and maybe set timestamp return assignOffsetsNonCompressed(offsetCounter, metricsRecorder); } else return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier); }
@Test public void testInvalidCreateTimeCompressedV1() { long now = System.currentTimeMillis(); Compression compression = Compression.gzip().build(); MemoryRecords records = createRecords( RecordBatch.MAGIC_VALUE_V1, now - 1001L, compression ); assertThrows(RecordValidationException.class, () -> new LogValidator( records, new TopicPartition("topic", 0), time, CompressionType.GZIP, compression, false, RecordBatch.MAGIC_VALUE_V1, TimestampType.CREATE_TIME, 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, AppendOrigin.CLIENT, MetadataVersion.latestTesting() ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ) ); }
@Override public int length() { return 2; }
@Test public void testLength() { System.out.println("length"); GaussianDistribution instance = new GaussianDistribution(3.0, 2.0); instance.rand(); assertEquals(2, instance.length()); }
@Override public CRMaterial deserialize(JsonElement json, Type type, JsonDeserializationContext context) throws JsonParseException { return determineJsonElementForDistinguishingImplementers(json, context, TYPE, ARTIFACT_ORIGIN); }
@Test public void shouldDeserializeTfsMaterialType() { JsonObject jsonObject = new JsonObject(); jsonObject.addProperty("type", "tfs"); materialTypeAdapter.deserialize(jsonObject, type, jsonDeserializationContext); verify(jsonDeserializationContext).deserialize(jsonObject, CRTfsMaterial.class); }
@Override public Result reconcile(Request request) { client.fetch(User.class, request.name()).ifPresent(user -> { if (isDeleted(user)) { deleteUserConnections(request.name()); removeFinalizers(user.getMetadata(), Set.of(FINALIZER_NAME)); client.update(user); return; } addFinalizers(user.getMetadata(), Set.of(FINALIZER_NAME)); ensureRoleNamesAnno(user); updatePermalink(user); handleAvatar(user); checkVerifiedEmail(user); client.update(user); }); return new Result(false, null); }
@Test void permalinkForFakeUser() throws URISyntaxException { when(externalUrlSupplier.get()).thenReturn(new URI("http://localhost:8090")); when(roleService.getRolesByUsername("fake-user")) .thenReturn(Flux.empty()); when(client.fetch(eq(User.class), eq("fake-user"))) .thenReturn(Optional.of(user("fake-user"))); userReconciler.reconcile(new Reconciler.Request("fake-user")); verify(client).<User>update(assertArg(user -> assertEquals( "http://localhost:8090/authors/fake-user", user.getStatus().getPermalink() ) )); }
@Override public boolean supportsCatalogsInTableDefinitions() { return false; }
@Test void assertSupportsCatalogsInTableDefinitions() { assertFalse(metaData.supportsCatalogsInTableDefinitions()); }
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat( String groupId, String memberId, int memberEpoch, String instanceId, String rackId, int rebalanceTimeoutMs, String clientId, String clientHost, List<String> subscribedTopicNames, String assignorName, List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions ) throws ApiException { final long currentTimeMs = time.milliseconds(); final List<CoordinatorRecord> records = new ArrayList<>(); // Get or create the consumer group. boolean createIfNotExists = memberEpoch == 0; final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records); throwIfConsumerGroupIsFull(group, memberId); // Get or create the member. if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString(); final ConsumerGroupMember member; if (instanceId == null) { member = getOrMaybeSubscribeDynamicConsumerGroupMember( group, memberId, memberEpoch, ownedTopicPartitions, createIfNotExists, false ); } else { member = getOrMaybeSubscribeStaticConsumerGroupMember( group, memberId, memberEpoch, instanceId, ownedTopicPartitions, createIfNotExists, false, records ); } // 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue // record is written to the __consumer_offsets partition to persist the change. If the subscriptions have // changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue // record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have // changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition. ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member) .maybeUpdateInstanceId(Optional.ofNullable(instanceId)) .maybeUpdateRackId(Optional.ofNullable(rackId)) .maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs)) .maybeUpdateServerAssignorName(Optional.ofNullable(assignorName)) .maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames)) .setClientId(clientId) .setClientHost(clientHost) .setClassicMemberMetadata(null) .build(); boolean bumpGroupEpoch = hasMemberSubscriptionChanged( groupId, member, updatedMember, records ); int groupEpoch = group.groupEpoch(); Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata(); Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames(); SubscriptionType subscriptionType = group.subscriptionType(); if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) { // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); subscriptionMetadata = group.computeSubscriptionMetadata( subscribedTopicNamesMap, metadataImage.topics(), metadataImage.cluster() ); int numMembers = group.numMembers(); if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) { numMembers++; } subscriptionType = ModernGroup.subscriptionType( subscribedTopicNamesMap, numMembers ); if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { log.info("[GroupId {}] Computed new subscription metadata: {}.", groupId, subscriptionMetadata); bumpGroupEpoch = true; records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); } if (bumpGroupEpoch) { groupEpoch += 1; records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); } group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch); } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between // the existing and the new target assignment is persisted to the partition. final int targetAssignmentEpoch; final Assignment targetAssignment; if (groupEpoch > group.assignmentEpoch()) { targetAssignment = updateTargetAssignment( group, groupEpoch, member, updatedMember, subscriptionMetadata, subscriptionType, records ); targetAssignmentEpoch = groupEpoch; } else { targetAssignmentEpoch = group.assignmentEpoch(); targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId()); } // 3. Reconcile the member's assignment with the target assignment if the member is not // fully reconciled yet. updatedMember = maybeReconcile( groupId, updatedMember, group::currentPartitionEpoch, targetAssignmentEpoch, targetAssignment, ownedTopicPartitions, records ); scheduleConsumerGroupSessionTimeout(groupId, memberId); // Prepare the response. ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData() .setMemberId(updatedMember.memberId()) .setMemberEpoch(updatedMember.memberEpoch()) .setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId)); // The assignment is only provided in the following cases: // 1. The member sent a full request. It does so when joining or rejoining the group with zero // as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields // (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request // as those must be set in a full request. // 2. The member's assignment has been updated. boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null); if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) { response.setAssignment(createConsumerGroupResponseAssignment(updatedMember)); } return new CoordinatorResult<>(records, response); }
@Test public void testSessionTimeoutLifecycle() { String groupId = "fooup"; // Use a static member id as it makes the test easier. String memberId = Uuid.randomUuid().toString(); Uuid fooTopicId = Uuid.randomUuid(); String fooTopicName = "foo"; MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroupAssignors(Collections.singletonList(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addRacks() .build()) .build(); assignor.prepareGroupAssignment(new GroupAssignment( Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5) ))) )); // Session timer is scheduled on first heartbeat. CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId) .setMemberEpoch(0) .setRebalanceTimeoutMs(90000) .setSubscribedTopicNames(Collections.singletonList("foo")) .setTopicPartitions(Collections.emptyList())); assertEquals(1, result.response().memberEpoch()); // Verify that there is a session time. context.assertSessionTimeout(groupId, memberId, 45000); // Advance time. assertEquals( Collections.emptyList(), context.sleep(result.response().heartbeatIntervalMs()) ); // Session timer is rescheduled on second heartbeat. result = context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId) .setMemberEpoch(result.response().memberEpoch())); assertEquals(1, result.response().memberEpoch()); // Verify that there is a session time. context.assertSessionTimeout(groupId, memberId, 45000); // Advance time. assertEquals( Collections.emptyList(), context.sleep(result.response().heartbeatIntervalMs()) ); // Session timer is cancelled on leave. result = context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId) .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH)); assertEquals(LEAVE_GROUP_MEMBER_EPOCH, result.response().memberEpoch()); // Verify that there are no timers. context.assertNoSessionTimeout(groupId, memberId); context.assertNoRebalanceTimeout(groupId, memberId); }
@Override @Cacheable(cacheNames = RedisKeyConstants.SMS_TEMPLATE, key = "#code", unless = "#result == null") public SmsTemplateDO getSmsTemplateByCodeFromCache(String code) { return smsTemplateMapper.selectByCode(code); }
@Test public void testGetSmsTemplateByCodeFromCache() { // mock 数据 SmsTemplateDO dbSmsTemplate = randomSmsTemplateDO(); smsTemplateMapper.insert(dbSmsTemplate);// @Sql: 先插入出一条存在的数据 // 准备参数 String code = dbSmsTemplate.getCode(); // 调用 SmsTemplateDO smsTemplate = smsTemplateService.getSmsTemplateByCodeFromCache(code); // 校验 assertPojoEquals(dbSmsTemplate, smsTemplate); }
public Set<GrokPattern> patterns() { return patterns.get(); }
@Test public void grokPatternsChanged() { final Set<GrokPattern> newPatterns = Collections.singleton(GrokPattern.create("NEW_PATTERN", "\\w+")); when(grokPatternService.loadAll()).thenReturn(newPatterns); eventBus.post(GrokPatternsUpdatedEvent.create(Collections.singleton("NEW_PATTERN"))); assertThat(grokPatternRegistry.patterns()).isEqualTo(newPatterns); }
public static void applyTimestampIndex(TableConfig tableConfig, Schema schema) { if (tableConfig.getFieldConfigList() == null) { return; } Map<String, List<TimestampIndexGranularity>> timestampIndexConfigs = new HashMap<>(); for (FieldConfig fieldConfig : tableConfig.getFieldConfigList()) { TimestampConfig timestampConfig = fieldConfig.getTimestampConfig(); if (timestampConfig == null || CollectionUtils.isEmpty(timestampConfig.getGranularities())) { continue; } timestampIndexConfigs.put(fieldConfig.getName(), timestampConfig.getGranularities()); } if (timestampIndexConfigs.isEmpty()) { return; } // Synchronize on table config object to prevent concurrent modification //noinspection SynchronizationOnLocalVariableOrMethodParameter synchronized (tableConfig) { // Check if the updates are already applied boolean schemaApplied; boolean tableConfigApplied; Map.Entry<String, List<TimestampIndexGranularity>> sampleEntry = timestampIndexConfigs.entrySet().iterator().next(); String sampleTimestampColumnWithGranularity = getColumnWithGranularity(sampleEntry.getKey(), sampleEntry.getValue().get(0)); schemaApplied = schema.hasColumn(sampleTimestampColumnWithGranularity); IndexingConfig indexingConfig = tableConfig.getIndexingConfig(); List<String> rangeIndexColumns = indexingConfig.getRangeIndexColumns(); tableConfigApplied = rangeIndexColumns != null && rangeIndexColumns.contains(sampleTimestampColumnWithGranularity); if (schemaApplied && tableConfigApplied) { return; } // Apply TIMESTAMP index List<TransformConfig> transformConfigs = null; if (!tableConfigApplied) { if (rangeIndexColumns == null) { rangeIndexColumns = new ArrayList<>(); indexingConfig.setRangeIndexColumns(rangeIndexColumns); } IngestionConfig ingestionConfig = tableConfig.getIngestionConfig(); if (ingestionConfig == null) { ingestionConfig = new IngestionConfig(); tableConfig.setIngestionConfig(ingestionConfig); } transformConfigs = ingestionConfig.getTransformConfigs(); if (transformConfigs == null) { transformConfigs = new ArrayList<>(); ingestionConfig.setTransformConfigs(transformConfigs); } } for (Map.Entry<String, List<TimestampIndexGranularity>> entry : timestampIndexConfigs.entrySet()) { String timestampColumn = entry.getKey(); for (TimestampIndexGranularity granularity : entry.getValue()) { String columnWithGranularity = getColumnWithGranularity(timestampColumn, granularity); if (!schemaApplied) { schema.addField(getFieldSpecWithGranularity(columnWithGranularity, granularity)); } if (!tableConfigApplied) { transformConfigs.add( new TransformConfig(columnWithGranularity, getTransformExpression(timestampColumn, granularity))); rangeIndexColumns.add(columnWithGranularity); } } } } }
@Test public void testApplyTimestampIndex() { TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("testTable").setFieldConfigList( Arrays.asList( new FieldConfig("ts1", FieldConfig.EncodingType.DICTIONARY, FieldConfig.IndexType.TIMESTAMP, null, null, new TimestampConfig(Arrays.asList(TimestampIndexGranularity.SECOND, TimestampIndexGranularity.MINUTE, TimestampIndexGranularity.HOUR)), null), new FieldConfig("ts2", FieldConfig.EncodingType.RAW, FieldConfig.IndexType.TIMESTAMP, null, null, new TimestampConfig(Arrays.asList(TimestampIndexGranularity.DAY, TimestampIndexGranularity.WEEK, TimestampIndexGranularity.MONTH)), null))).build(); Schema schema = new Schema.SchemaBuilder().setSchemaName("testTable") .addDateTime("ts1", DataType.TIMESTAMP, "TIMESTAMP", "1:MILLISECONDS") .addDateTime("ts2", DataType.TIMESTAMP, "TIMESTAMP", "1:MILLISECONDS").build(); // Apply TIMESTAMP index multiple times should get the same result for (int i = 0; i < 5; i++) { TimestampIndexUtils.applyTimestampIndex(tableConfig, schema); // Check schema assertEquals(schema.size(), 8); FieldSpec ts1SecondFieldSpec = schema.getFieldSpecFor("$ts1$SECOND"); assertNotNull(ts1SecondFieldSpec); assertTrue(ts1SecondFieldSpec instanceof DateTimeFieldSpec); DateTimeFieldSpec ts1SecondDateTimeFieldSpec = (DateTimeFieldSpec) ts1SecondFieldSpec; assertEquals(ts1SecondDateTimeFieldSpec.getDataType(), DataType.TIMESTAMP); assertEquals(ts1SecondDateTimeFieldSpec.getFormat(), "TIMESTAMP"); assertEquals(ts1SecondDateTimeFieldSpec.getGranularity(), "1:SECONDS"); // Check ingestion transform assertNotNull(tableConfig.getIngestionConfig()); List<TransformConfig> transformConfigs = tableConfig.getIngestionConfig().getTransformConfigs(); assertNotNull(transformConfigs); assertEquals(transformConfigs.size(), 6); Set<String> transformColumns = new HashSet<>(); for (TransformConfig transformConfig : transformConfigs) { String columnName = transformConfig.getColumnName(); assertTrue(transformColumns.add(columnName)); if (columnName.equals("$ts2$DAY")) { assertEquals(transformConfig.getTransformFunction(), "dateTrunc('DAY',\"ts2\")"); } } assertEquals(transformColumns, new HashSet<>( Arrays.asList("$ts1$SECOND", "$ts1$MINUTE", "$ts1$HOUR", "$ts2$DAY", "$ts2$WEEK", "$ts2$MONTH"))); // Check range index List<String> rangeIndexColumns = tableConfig.getIndexingConfig().getRangeIndexColumns(); assertNotNull(rangeIndexColumns); assertEquals(new HashSet<>(rangeIndexColumns), transformColumns); } }
public static StructType groupingKeyType(Schema schema, Collection<PartitionSpec> specs) { return buildPartitionProjectionType("grouping key", specs, commonActiveFieldIds(schema, specs)); }
@Test public void testGroupingKeyTypeWithOnlyUnpartitionedSpec() { TestTables.TestTable table = TestTables.create( tableDir, "test", SCHEMA, PartitionSpec.unpartitioned(), V1_FORMAT_VERSION); assertThat(table.specs()).hasSize(1); StructType expectedType = StructType.of(); StructType actualType = Partitioning.groupingKeyType(table.schema(), table.specs().values()); assertThat(actualType).isEqualTo(expectedType); }
@Override public void execute(Context context) { List<MeasureComputerWrapper> wrappers = Arrays.stream(measureComputers).map(ToMeasureWrapper.INSTANCE).toList(); validateMetrics(wrappers); measureComputersHolder.setMeasureComputers(sortComputers(wrappers)); }
@Test public void fail_with_ISE_when_no_metrics_are_defined_by_plugin_but_measure_computer_use_a_new_metric() { assertThatThrownBy(() -> { MeasureComputer[] computers = new MeasureComputer[] {newMeasureComputer(array(NCLOC_KEY), array(NEW_METRIC_1))}; ComputationStep underTest = new LoadMeasureComputersStep(holder, computers); underTest.execute(new TestComputationStepContext()); }) .isInstanceOf(IllegalStateException.class) .hasMessage("Metric 'metric1' cannot be used as an output metric because no plugins declare this metric"); }
public static void validate(FilterPredicate predicate, MessageType schema) { Objects.requireNonNull(predicate, "predicate cannot be null"); Objects.requireNonNull(schema, "schema cannot be null"); predicate.accept(new SchemaCompatibilityValidator(schema)); }
@Test public void testFindsInvalidTypes() { try { validate(complexWrongType, schema); fail("this should throw"); } catch (IllegalArgumentException e) { assertEquals( "FilterPredicate column: x.bar's declared type (java.lang.Long) does not match the schema found in file metadata. " + "Column x.bar is of type: INT32\n" + "Valid types for this column are: [class java.lang.Integer]", e.getMessage()); } }
public static String extractArgumentsFromAttributeName(String attributeNameWithArguments) { int start = StringUtil.lastIndexOf(attributeNameWithArguments, '['); int end = StringUtil.lastIndexOf(attributeNameWithArguments, ']'); if (start > 0 && end > 0 && end > start) { return attributeNameWithArguments.substring(start + 1, end); } if (start < 0 && end < 0) { return null; } throw new IllegalArgumentException("Wrong argument input passed " + attributeNameWithArguments); }
@Test public void extractArgument_wrongArguments_tooManySquareBrackets_lastExtracted() { assertEquals("BAR", extractArgumentsFromAttributeName("car.wheel[2].pressure[BAR]")); }
public Optional<Group> takeGroup(Set<Integer> rejectedGroups) { synchronized (this) { Optional<GroupStatus> best = scheduler.takeNextGroup(rejectedGroups); if (best.isPresent()) { GroupStatus gs = best.get(); gs.allocate(); Group ret = gs.group; log.fine(() -> "Offering <" + ret + "> for query connection"); return Optional.of(ret); } else { return Optional.empty(); } } }
@Test void requireThatLoadBalancerServesClusteredGroups() { Node n1 = new Node("test", 0, "test-node1", 0); Node n2 = new Node("test", 1, "test-node2", 0); Node n3 = new Node("test", 0, "test-node3", 1); Node n4 = new Node("test", 1, "test-node4", 1); LoadBalancer lb = new LoadBalancer(List.of(new Group(0, List.of(n1,n2)), new Group(1,List.of(n3,n4))), LoadBalancer.Policy.ROUNDROBIN); Optional<Group> grp = lb.takeGroup(null); assertTrue(grp.isPresent()); }
public byte[] encode(String val, String delimiters) { return codecs[0].encode(val); }
@Test public void testEncodeKoreanLongText() { assertArrayEquals(KOREAN_LONG_TEXT_BYTES, ksx1001().encode(KOREAN_LONG_TEXT, LT_DELIMS)); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain) throws IOException, ServletException { String sourceHost = ((HttpServletRequest) request).getHeader("Origin"); String origin = ""; try { if (CorsUtils.isValidOrigin(sourceHost, zConf)) { origin = sourceHost; } } catch (URISyntaxException e) { LOGGER.error("Exception in WebDriverManager while getWebDriver ", e); } if (((HttpServletRequest) request).getMethod().equals("OPTIONS")) { HttpServletResponse resp = ((HttpServletResponse) response); addCorsHeaders(resp, origin); return; } if (response instanceof HttpServletResponse) { HttpServletResponse alteredResponse = ((HttpServletResponse) response); addCorsHeaders(alteredResponse, origin); } filterChain.doFilter(request, response); }
@Test @SuppressWarnings("rawtypes") void validCorsFilterTest() throws IOException, ServletException { CorsFilter filter = new CorsFilter(ZeppelinConfiguration.load()); HttpServletResponse mockResponse = mock(HttpServletResponse.class); FilterChain mockedFilterChain = mock(FilterChain.class); HttpServletRequest mockRequest = mock(HttpServletRequest.class); when(mockRequest.getHeader("Origin")).thenReturn("http://localhost:8080"); when(mockRequest.getMethod()).thenReturn("Empty"); when(mockRequest.getServerName()).thenReturn("localhost"); count = 0; doAnswer(new Answer() { @Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { headers[count] = invocationOnMock.getArguments()[1].toString(); count++; return null; } }).when(mockResponse).setHeader(anyString(), anyString()); filter.doFilter(mockRequest, mockResponse, mockedFilterChain); assertEquals("http://localhost:8080", headers[0]); }
protected void notifyDisConnected(Connection connection) { if (connectionEventListeners.isEmpty()) { return; } LoggerUtils.printIfInfoEnabled(LOGGER, "[{}] Notify disconnected event to listeners", rpcClientConfig.name()); for (ConnectionEventListener connectionEventListener : connectionEventListeners) { try { connectionEventListener.onDisConnect(connection); } catch (Throwable throwable) { LoggerUtils.printIfErrorEnabled(LOGGER, "[{}] Notify disconnect listener error, listener = {}", rpcClientConfig.name(), connectionEventListener.getClass().getName()); } } }
@Test void testNotifyDisConnected() { ConnectionEventListener listener = mock(ConnectionEventListener.class); rpcClient.registerConnectionListener(listener); rpcClient.notifyDisConnected(null); verify(listener).onDisConnect(null); verify(rpcClientConfig, times(2)).name(); }
@Override public GetContainerReportResponse getContainerReport( GetContainerReportRequest request) throws YarnException, IOException { ContainerId containerId = request.getContainerId(); try { GetContainerReportResponse response = GetContainerReportResponse.newInstance(history .getContainer(containerId)); return response; } catch (IOException e) { LOG.error(e.getMessage(), e); throw e; } }
@Test void testContainerNotFound() throws IOException, YarnException { ApplicationId appId = ApplicationId.newInstance(0, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId containerId = ContainerId.newContainerId(appAttemptId, MAX_APPS + 1); GetContainerReportRequest request = GetContainerReportRequest.newInstance(containerId); try { @SuppressWarnings("unused") GetContainerReportResponse response = clientService.getContainerReport(request); } catch (ContainerNotFoundException e) { //This exception is expected assertTrue(e.getMessage().contains( "doesn't exist in the timeline store")); } catch (Exception e) { fail("Undesired exception caught"); } }
public ParsedQuery parse(final String query) throws ParseException { final TokenCollectingQueryParser parser = new TokenCollectingQueryParser(ParsedTerm.DEFAULT_FIELD, ANALYZER); parser.setSplitOnWhitespace(true); parser.setAllowLeadingWildcard(allowLeadingWildcard); final Query parsed = parser.parse(query); final ParsedQuery.Builder builder = ParsedQuery.builder().query(query); builder.tokensBuilder().addAll(parser.getTokens()); final TermCollectingQueryVisitor visitor = new TermCollectingQueryVisitor(ANALYZER, parser.getTokenLookup()); parsed.visit(visitor); builder.termsBuilder().addAll(visitor.getParsedTerms()); return builder.build(); }
@Test void testRepeatedQuery() throws ParseException { final ParsedQuery parsedQuery = parser.parse("foo:bar AND foo:bar AND something:else"); assertThat(parsedQuery.terms().size()).isEqualTo(3); assertThat(parsedQuery.terms()) .filteredOn(parsedTerm -> parsedTerm.field().equals("foo")) .extracting(ParsedTerm::keyToken) .extracting(Optional::get) .hasSize(2) .satisfies(keyTokens -> assertThat(keyTokens.get(0)).isNotEqualTo(keyTokens.get(1))); }
public static double d(int[] x, int[] y) { if (x.length != y.length) { throw new IllegalArgumentException(String.format("Arrays have different length: x[%d], y[%d]", x.length, y.length)); } double dist = 0.0; for (int i = 0; i < x.length; i++) { double d = Math.abs(x[i] - y[i]); if (dist < d) dist = d; } return dist; }
@Test public void testDistance() { System.out.println("distance"); double[] x = {-2.1968219, -0.9559913, -0.0431738, 1.0567679, 0.3853515}; double[] y = {-1.7781325, -0.6659839, 0.9526148, -0.9460919, -0.3925300}; assertEquals(2.00286, new ChebyshevDistance().d(x, y), 1E-5); }
public <T> void compareSupplierResultAsync(final T expected, final Supplier<T> experimentSupplier, final Executor executor) { final Timer.Sample sample = Timer.start(); try { compareFutureResult(expected, CompletableFuture.supplyAsync(experimentSupplier, executor)); } catch (final Exception e) { recordError(e, sample); } }
@Test void compareSupplierResultAsyncError() throws InterruptedException { final ExecutorService experimentExecutor = Executors.newSingleThreadExecutor(); experiment.compareSupplierResultAsync(12, () -> { throw new RuntimeException("OH NO"); }, experimentExecutor); experimentExecutor.shutdown(); experimentExecutor.awaitTermination(1, TimeUnit.SECONDS); verify(errorTimer).record(anyLong(), eq(TimeUnit.NANOSECONDS)); }
@Override public void getConfig(StorServerConfig.Builder builder) { super.getConfig(builder); provider.getConfig(builder); }
@Test void testComplexGarbageCollectionSelectionForIndexedSearch() { StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder(); parse("<cluster id=\"foo\">\n" + " <redundancy>3</redundancy>" + " <documents garbage-collection=\"true\" selection=\"true\">" + " <document type=\"music\" selection=\"music.year &lt; now()\"/>\n" + " <document type=\"movies\" selection=\"movies.year &lt; now() - 1200\"/>\n" + " </documents>\n" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</cluster>").getConfig(builder); StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder); assertEquals(3600, conf.garbagecollection().interval()); assertEquals( "not ((true) and ((music and (music.year < now())) or (movies and (movies.year < now() - 1200))))", conf.garbagecollection().selectiontoremove()); }
@Override public boolean equals(Object o) { if (!(o instanceof ControllerRegistration)) return false; ControllerRegistration other = (ControllerRegistration) o; return other.id == id && other.incarnationId.equals(incarnationId) && other.zkMigrationReady == zkMigrationReady && other.listeners.equals(listeners) && other.supportedFeatures.equals(supportedFeatures); }
@Test public void testEquals() { assertNotEquals(REGISTRATIONS.get(0), REGISTRATIONS.get(1)); assertNotEquals(REGISTRATIONS.get(1), REGISTRATIONS.get(0)); assertNotEquals(REGISTRATIONS.get(0), REGISTRATIONS.get(2)); assertNotEquals(REGISTRATIONS.get(2), REGISTRATIONS.get(0)); assertEquals(REGISTRATIONS.get(0), REGISTRATIONS.get(0)); assertEquals(REGISTRATIONS.get(1), REGISTRATIONS.get(1)); assertEquals(REGISTRATIONS.get(2), REGISTRATIONS.get(2)); }
public void sort(String id1, SortDir dir1, String id2, SortDir dir2) { Collections.sort(rows, new RowComparator(id1, dir1, id2, dir2)); }
@Test public void sortAlphaAscNumberAsc() { tm = unsortedDoubleTableModel(); verifyRowOrder("unsorted", tm, UNSORTED_IDS); tm.sort(ALPHA, SortDir.ASC, NUMBER, SortDir.ASC); verifyRowOrder("aana", tm, ROW_ORDER_AA_NA); }
public static int toIntSize(long size) { assert size >= 0 : "Invalid size value: " + size; return size > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) size; }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void toIntSize_whenNegative() { long size = -1; assertEquals((int) size, toIntSize(size)); }
public void removeItem(final String key) { this.apolloOpenApiClient.removeItem( apolloConfig.getAppId(), apolloConfig.getEnv(), apolloConfig.getClusterName(), apolloConfig.getNamespace(), key, DEFAULT_USER ); }
@Test public void testRemoveItem() { String keyToRemove = "KeyToRemove"; apolloClient.removeItem(keyToRemove); verify(apolloClient).removeItem(keyToRemove); }