index int64 | repo_id string | file_path string | content string |
|---|---|---|---|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/CreatePartitionsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* Options for {@link Admin#createPartitions(Map)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class CreatePartitionsOptions extends AbstractOptions<CreatePartitionsOptions> {
private boolean validateOnly = false;
private boolean retryOnQuotaViolation = true;
public CreatePartitionsOptions() {
}
/**
* Return true if the request should be validated without creating new partitions.
*/
public boolean validateOnly() {
return validateOnly;
}
/**
* Set to true if the request should be validated without creating new partitions.
*/
public CreatePartitionsOptions validateOnly(boolean validateOnly) {
this.validateOnly = validateOnly;
return this;
}
/**
* Set to true if quota violation should be automatically retried.
*/
public CreatePartitionsOptions retryOnQuotaViolation(boolean retryOnQuotaViolation) {
this.retryOnQuotaViolation = retryOnQuotaViolation;
return this;
}
/**
* Returns true if quota violation should be automatically retried.
*/
public boolean shouldRetryOnQuotaViolation() {
return retryOnQuotaViolation;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/CreatePartitionsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* The result of the {@link Admin#createPartitions(Map)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class CreatePartitionsResult {
private final Map<String, KafkaFuture<Void>> values;
CreatePartitionsResult(Map<String, KafkaFuture<Void>> values) {
this.values = values;
}
/**
* Return a map from topic names to futures, which can be used to check the status of individual
* partition creations.
*/
public Map<String, KafkaFuture<Void>> values() {
return values;
}
/**
* Return a future which succeeds if all the partition creations succeed.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(values.values().toArray(new KafkaFuture[0]));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/CreateTopicsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for {@link Admin#createTopics(Collection)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class CreateTopicsOptions extends AbstractOptions<CreateTopicsOptions> {
private boolean validateOnly = false;
private boolean retryOnQuotaViolation = true;
/**
* Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the
* AdminClient should be used.
*
*/
// This method is retained to keep binary compatibility with 0.11
public CreateTopicsOptions timeoutMs(Integer timeoutMs) {
this.timeoutMs = timeoutMs;
return this;
}
/**
* Set to true if the request should be validated without creating the topic.
*/
public CreateTopicsOptions validateOnly(boolean validateOnly) {
this.validateOnly = validateOnly;
return this;
}
/**
* Return true if the request should be validated without creating the topic.
*/
public boolean shouldValidateOnly() {
return validateOnly;
}
/**
* Set to true if quota violation should be automatically retried.
*/
public CreateTopicsOptions retryOnQuotaViolation(boolean retryOnQuotaViolation) {
this.retryOnQuotaViolation = retryOnQuotaViolation;
return this;
}
/**
* Returns true if quota violation should be automatically retried.
*/
public boolean shouldRetryOnQuotaViolation() {
return retryOnQuotaViolation;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/CreateTopicsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.errors.ApiException;
import java.util.Collection;
import java.util.Map;
import java.util.stream.Collectors;
/**
* The result of {@link Admin#createTopics(Collection)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class CreateTopicsResult {
final static int UNKNOWN = -1;
private final Map<String, KafkaFuture<TopicMetadataAndConfig>> futures;
protected CreateTopicsResult(Map<String, KafkaFuture<TopicMetadataAndConfig>> futures) {
this.futures = futures;
}
/**
* Return a map from topic names to futures, which can be used to check the status of individual
* topic creations.
*/
public Map<String, KafkaFuture<Void>> values() {
return futures.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().thenApply(v -> (Void) null)));
}
/**
* Return a future which succeeds if all the topic creations succeed.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
}
/**
* Returns a future that provides topic configs for the topic when the request completes.
* <p>
* If broker version doesn't support replication factor in the response, throw
* {@link org.apache.kafka.common.errors.UnsupportedVersionException}.
* If broker returned an error for topic configs, throw appropriate exception. For example,
* {@link org.apache.kafka.common.errors.TopicAuthorizationException} is thrown if user does not
* have permission to describe topic configs.
*/
public KafkaFuture<Config> config(String topic) {
return futures.get(topic).thenApply(TopicMetadataAndConfig::config);
}
/**
* Returns a future that provides topic ID for the topic when the request completes.
* <p>
* If broker version doesn't support replication factor in the response, throw
* {@link org.apache.kafka.common.errors.UnsupportedVersionException}.
* If broker returned an error for topic configs, throw appropriate exception. For example,
* {@link org.apache.kafka.common.errors.TopicAuthorizationException} is thrown if user does not
* have permission to describe topic configs.
*/
public KafkaFuture<Uuid> topicId(String topic) {
return futures.get(topic).thenApply(TopicMetadataAndConfig::topicId);
}
/**
* Returns a future that provides number of partitions in the topic when the request completes.
* <p>
* If broker version doesn't support replication factor in the response, throw
* {@link org.apache.kafka.common.errors.UnsupportedVersionException}.
* If broker returned an error for topic configs, throw appropriate exception. For example,
* {@link org.apache.kafka.common.errors.TopicAuthorizationException} is thrown if user does not
* have permission to describe topic configs.
*/
public KafkaFuture<Integer> numPartitions(String topic) {
return futures.get(topic).thenApply(TopicMetadataAndConfig::numPartitions);
}
/**
* Returns a future that provides replication factor for the topic when the request completes.
* <p>
* If broker version doesn't support replication factor in the response, throw
* {@link org.apache.kafka.common.errors.UnsupportedVersionException}.
* If broker returned an error for topic configs, throw appropriate exception. For example,
* {@link org.apache.kafka.common.errors.TopicAuthorizationException} is thrown if user does not
* have permission to describe topic configs.
*/
public KafkaFuture<Integer> replicationFactor(String topic) {
return futures.get(topic).thenApply(TopicMetadataAndConfig::replicationFactor);
}
public static class TopicMetadataAndConfig {
private final ApiException exception;
private final Uuid topicId;
private final int numPartitions;
private final int replicationFactor;
private final Config config;
public TopicMetadataAndConfig(Uuid topicId, int numPartitions, int replicationFactor, Config config) {
this.exception = null;
this.topicId = topicId;
this.numPartitions = numPartitions;
this.replicationFactor = replicationFactor;
this.config = config;
}
public TopicMetadataAndConfig(ApiException exception) {
this.exception = exception;
this.topicId = Uuid.ZERO_UUID;
this.numPartitions = UNKNOWN;
this.replicationFactor = UNKNOWN;
this.config = null;
}
public Uuid topicId() {
ensureSuccess();
return topicId;
}
public int numPartitions() {
ensureSuccess();
return numPartitions;
}
public int replicationFactor() {
ensureSuccess();
return replicationFactor;
}
public Config config() {
ensureSuccess();
return config;
}
private void ensureSuccess() {
if (exception != null)
throw exception;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DeleteAclsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for the {@link Admin#deleteAcls(Collection)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DeleteAclsOptions extends AbstractOptions<DeleteAclsOptions> {
/**
* Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the
* AdminClient should be used.
*
*/
// This method is retained to keep binary compatibility with 0.11
public DeleteAclsOptions timeoutMs(Integer timeoutMs) {
this.timeoutMs = timeoutMs;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DeleteAclsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.errors.ApiException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* The result of the {@link Admin#deleteAcls(Collection)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DeleteAclsResult {
/**
* A class containing either the deleted ACL binding or an exception if the delete failed.
*/
public static class FilterResult {
private final AclBinding binding;
private final ApiException exception;
FilterResult(AclBinding binding, ApiException exception) {
this.binding = binding;
this.exception = exception;
}
/**
* Return the deleted ACL binding or null if there was an error.
*/
public AclBinding binding() {
return binding;
}
/**
* Return an exception if the ACL delete was not successful or null if it was.
*/
public ApiException exception() {
return exception;
}
}
/**
* A class containing the results of the delete ACLs operation.
*/
public static class FilterResults {
private final List<FilterResult> values;
FilterResults(List<FilterResult> values) {
this.values = values;
}
/**
* Return a list of delete ACLs results for a given filter.
*/
public List<FilterResult> values() {
return values;
}
}
private final Map<AclBindingFilter, KafkaFuture<FilterResults>> futures;
DeleteAclsResult(Map<AclBindingFilter, KafkaFuture<FilterResults>> futures) {
this.futures = futures;
}
/**
* Return a map from acl filters to futures which can be used to check the status of the deletions by each
* filter.
*/
public Map<AclBindingFilter, KafkaFuture<FilterResults>> values() {
return futures;
}
/**
* Return a future which succeeds only if all the ACLs deletions succeed, and which contains all the deleted ACLs.
* Note that it if the filters don't match any ACLs, this is not considered an error.
*/
public KafkaFuture<Collection<AclBinding>> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply(v -> getAclBindings(futures));
}
private List<AclBinding> getAclBindings(Map<AclBindingFilter, KafkaFuture<FilterResults>> futures) {
List<AclBinding> acls = new ArrayList<>();
for (KafkaFuture<FilterResults> value: futures.values()) {
FilterResults results;
try {
results = value.get();
} catch (Throwable e) {
// This should be unreachable, since the future returned by KafkaFuture#allOf should
// have failed if any Future failed.
throw new KafkaException("DeleteAclsResult#all: internal error", e);
}
for (FilterResult result : results.values()) {
if (result.exception() != null)
throw result.exception();
acls.add(result.binding());
}
}
return acls;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Set;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* Options for the {@link Admin#deleteConsumerGroupOffsets(String, Set)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DeleteConsumerGroupOffsetsOptions extends AbstractOptions<DeleteConsumerGroupOffsetsOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Set;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import org.apache.kafka.common.protocol.Errors;
/**
* The result of the {@link Admin#deleteConsumerGroupOffsets(String, Set)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DeleteConsumerGroupOffsetsResult {
private final KafkaFuture<Map<TopicPartition, Errors>> future;
private final Set<TopicPartition> partitions;
DeleteConsumerGroupOffsetsResult(KafkaFuture<Map<TopicPartition, Errors>> future, Set<TopicPartition> partitions) {
this.future = future;
this.partitions = partitions;
}
/**
* Return a future which can be used to check the result for a given partition.
*/
public KafkaFuture<Void> partitionResult(final TopicPartition partition) {
if (!partitions.contains(partition)) {
throw new IllegalArgumentException("Partition " + partition + " was not included in the original request");
}
final KafkaFutureImpl<Void> result = new KafkaFutureImpl<>();
this.future.whenComplete((topicPartitions, throwable) -> {
if (throwable != null) {
result.completeExceptionally(throwable);
} else if (!maybeCompleteExceptionally(topicPartitions, partition, result)) {
result.complete(null);
}
});
return result;
}
/**
* Return a future which succeeds only if all the deletions succeed.
* If not, the first partition error shall be returned.
*/
public KafkaFuture<Void> all() {
final KafkaFutureImpl<Void> result = new KafkaFutureImpl<>();
this.future.whenComplete((topicPartitions, throwable) -> {
if (throwable != null) {
result.completeExceptionally(throwable);
} else {
for (TopicPartition partition : partitions) {
if (maybeCompleteExceptionally(topicPartitions, partition, result)) {
return;
}
}
result.complete(null);
}
});
return result;
}
private boolean maybeCompleteExceptionally(Map<TopicPartition, Errors> partitionLevelErrors,
TopicPartition partition,
KafkaFutureImpl<Void> result) {
Throwable exception = KafkaAdminClient.getSubLevelError(partitionLevelErrors, partition,
"Offset deletion result for partition \"" + partition + "\" was not included in the response");
if (exception != null) {
result.completeExceptionally(exception);
return true;
} else {
return false;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DeleteConsumerGroupsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for the {@link Admin#deleteConsumerGroups(Collection)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DeleteConsumerGroupsOptions extends AbstractOptions<DeleteConsumerGroupsOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DeleteConsumerGroupsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
/**
* The result of the {@link Admin#deleteConsumerGroups(Collection)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DeleteConsumerGroupsResult {
private final Map<String, KafkaFuture<Void>> futures;
DeleteConsumerGroupsResult(final Map<String, KafkaFuture<Void>> futures) {
this.futures = futures;
}
/**
* Return a map from group id to futures which can be used to check the status of
* individual deletions.
*/
public Map<String, KafkaFuture<Void>> deletedGroups() {
Map<String, KafkaFuture<Void>> deletedGroups = new HashMap<>(futures.size());
futures.forEach((key, future) -> deletedGroups.put(key, future));
return deletedGroups;
}
/**
* Return a future which succeeds only if all the consumer group deletions succeed.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DeleteRecordsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* Options for {@link Admin#deleteRecords(Map, DeleteRecordsOptions)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DeleteRecordsOptions extends AbstractOptions<DeleteRecordsOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DeleteRecordsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* The result of the {@link Admin#deleteRecords(Map)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DeleteRecordsResult {
private final Map<TopicPartition, KafkaFuture<DeletedRecords>> futures;
public DeleteRecordsResult(Map<TopicPartition, KafkaFuture<DeletedRecords>> futures) {
this.futures = futures;
}
/**
* Return a map from topic partition to futures which can be used to check the status of
* individual deletions.
*/
public Map<TopicPartition, KafkaFuture<DeletedRecords>> lowWatermarks() {
return futures;
}
/**
* Return a future which succeeds only if all the records deletions succeed.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DeleteTopicsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for {@link Admin#deleteTopics(Collection)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DeleteTopicsOptions extends AbstractOptions<DeleteTopicsOptions> {
private boolean retryOnQuotaViolation = true;
/**
* Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the
* AdminClient should be used.
*
*/
// This method is retained to keep binary compatibility with 0.11
public DeleteTopicsOptions timeoutMs(Integer timeoutMs) {
this.timeoutMs = timeoutMs;
return this;
}
/**
* Set to true if quota violation should be automatically retried.
*/
public DeleteTopicsOptions retryOnQuotaViolation(boolean retryOnQuotaViolation) {
this.retryOnQuotaViolation = retryOnQuotaViolation;
return this;
}
/**
* Returns true if quota violation should be automatically retried.
*/
public boolean shouldRetryOnQuotaViolation() {
return retryOnQuotaViolation;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DeleteTopicsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicCollection;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.Map;
/**
* The result of the {@link Admin#deleteTopics(Collection)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DeleteTopicsResult {
private final Map<Uuid, KafkaFuture<Void>> topicIdFutures;
private final Map<String, KafkaFuture<Void>> nameFutures;
protected DeleteTopicsResult(Map<Uuid, KafkaFuture<Void>> topicIdFutures, Map<String, KafkaFuture<Void>> nameFutures) {
if (topicIdFutures != null && nameFutures != null)
throw new IllegalArgumentException("topicIdFutures and nameFutures cannot both be specified.");
if (topicIdFutures == null && nameFutures == null)
throw new IllegalArgumentException("topicIdFutures and nameFutures cannot both be null.");
this.topicIdFutures = topicIdFutures;
this.nameFutures = nameFutures;
}
static DeleteTopicsResult ofTopicIds(Map<Uuid, KafkaFuture<Void>> topicIdFutures) {
return new DeleteTopicsResult(topicIdFutures, null);
}
static DeleteTopicsResult ofTopicNames(Map<String, KafkaFuture<Void>> nameFutures) {
return new DeleteTopicsResult(null, nameFutures);
}
/**
* Use when {@link Admin#deleteTopics(TopicCollection, DeleteTopicsOptions)} used a TopicIdCollection
* @return a map from topic IDs to futures which can be used to check the status of
* individual deletions if the deleteTopics request used topic IDs. Otherwise return null.
*/
public Map<Uuid, KafkaFuture<Void>> topicIdValues() {
return topicIdFutures;
}
/**
* Use when {@link Admin#deleteTopics(TopicCollection, DeleteTopicsOptions)} used a TopicNameCollection
* @return a map from topic names to futures which can be used to check the status of
* individual deletions if the deleteTopics request used topic names. Otherwise return null.
*/
public Map<String, KafkaFuture<Void>> topicNameValues() {
return nameFutures;
}
/**
* @return a map from topic names to futures which can be used to check the status of
* individual deletions if the deleteTopics request used topic names. Otherwise return null.
* @deprecated Since 3.0 use {@link #topicNameValues} instead
*/
@Deprecated
public Map<String, KafkaFuture<Void>> values() {
return nameFutures;
}
/**
* @return a future which succeeds only if all the topic deletions succeed.
*/
public KafkaFuture<Void> all() {
return (topicIdFutures == null) ? KafkaFuture.allOf(nameFutures.values().toArray(new KafkaFuture[0])) :
KafkaFuture.allOf(topicIdFutures.values().toArray(new KafkaFuture[0]));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DeletedRecords.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* Represents information about deleted records
*
* The API for this class is still evolving and we may break compatibility in minor releases, if necessary.
*/
@InterfaceStability.Evolving
public class DeletedRecords {
private final long lowWatermark;
/**
* Create an instance of this class with the provided parameters.
*
* @param lowWatermark "low watermark" for the topic partition on which the deletion was executed
*/
public DeletedRecords(long lowWatermark) {
this.lowWatermark = lowWatermark;
}
/**
* Return the "low watermark" for the topic partition on which the deletion was executed
*/
public long lowWatermark() {
return lowWatermark;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeAclsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* Options for {@link Admin#describeAcls(AclBindingFilter)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeAclsOptions extends AbstractOptions<DescribeAclsOptions> {
/**
* Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the
* AdminClient should be used.
*
*/
// This method is retained to keep binary compatibility with 0.11
public DescribeAclsOptions timeoutMs(Integer timeoutMs) {
this.timeoutMs = timeoutMs;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeAclsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* The result of the {@link KafkaAdminClient#describeAcls(AclBindingFilter)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeAclsResult {
private final KafkaFuture<Collection<AclBinding>> future;
DescribeAclsResult(KafkaFuture<Collection<AclBinding>> future) {
this.future = future;
}
/**
* Return a future containing the ACLs requested.
*/
public KafkaFuture<Collection<AclBinding>> values() {
return future;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeClientQuotasOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.quota.ClientQuotaFilter;
/**
* Options for {@link Admin#describeClientQuotas(ClientQuotaFilter, DescribeClientQuotasOptions)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeClientQuotasOptions extends AbstractOptions<DescribeClientQuotasOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeClientQuotasResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.quota.ClientQuotaEntity;
import org.apache.kafka.common.quota.ClientQuotaFilter;
import java.util.Map;
/**
* The result of the {@link Admin#describeClientQuotas(ClientQuotaFilter, DescribeClientQuotasOptions)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeClientQuotasResult {
private final KafkaFuture<Map<ClientQuotaEntity, Map<String, Double>>> entities;
/**
* Maps an entity to its configured quota value(s). Note if no value is defined for a quota
* type for that entity's config, then it is not included in the resulting value map.
*
* @param entities future for the collection of entities that matched the filter
*/
public DescribeClientQuotasResult(KafkaFuture<Map<ClientQuotaEntity, Map<String, Double>>> entities) {
this.entities = entities;
}
/**
* Returns a map from quota entity to a future which can be used to check the status of the operation.
*/
public KafkaFuture<Map<ClientQuotaEntity, Map<String, Double>>> entities() {
return entities;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeClusterOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* Options for {@link Admin#describeCluster()}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeClusterOptions extends AbstractOptions<DescribeClusterOptions> {
private boolean includeAuthorizedOperations;
/**
* Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the
* AdminClient should be used.
*
*/
// This method is retained to keep binary compatibility with 0.11
public DescribeClusterOptions timeoutMs(Integer timeoutMs) {
this.timeoutMs = timeoutMs;
return this;
}
public DescribeClusterOptions includeAuthorizedOperations(boolean includeAuthorizedOperations) {
this.includeAuthorizedOperations = includeAuthorizedOperations;
return this;
}
/**
* Specify if authorized operations should be included in the response. Note that some
* older brokers cannot not supply this information even if it is requested.
*/
public boolean includeAuthorizedOperations() {
return includeAuthorizedOperations;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeClusterResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.Set;
/**
* The result of the {@link KafkaAdminClient#describeCluster()} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeClusterResult {
private final KafkaFuture<Collection<Node>> nodes;
private final KafkaFuture<Node> controller;
private final KafkaFuture<String> clusterId;
private final KafkaFuture<Set<AclOperation>> authorizedOperations;
DescribeClusterResult(KafkaFuture<Collection<Node>> nodes,
KafkaFuture<Node> controller,
KafkaFuture<String> clusterId,
KafkaFuture<Set<AclOperation>> authorizedOperations) {
this.nodes = nodes;
this.controller = controller;
this.clusterId = clusterId;
this.authorizedOperations = authorizedOperations;
}
/**
* Returns a future which yields a collection of nodes.
*/
public KafkaFuture<Collection<Node>> nodes() {
return nodes;
}
/**
* Returns a future which yields the current controller id.
* Note that this may yield null, if the controller ID is not yet known.
*/
public KafkaFuture<Node> controller() {
return controller;
}
/**
* Returns a future which yields the current cluster id. The future value will be non-null if the
* broker version is 0.10.1.0 or higher and null otherwise.
*/
public KafkaFuture<String> clusterId() {
return clusterId;
}
/**
* Returns a future which yields authorized operations. The future value will be non-null if the
* broker supplied this information, and null otherwise.
*/
public KafkaFuture<Set<AclOperation>> authorizedOperations() {
return authorizedOperations;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeConfigsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for {@link Admin#describeConfigs(Collection)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeConfigsOptions extends AbstractOptions<DescribeConfigsOptions> {
private boolean includeSynonyms = false;
private boolean includeDocumentation = false;
/**
* Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the
* AdminClient should be used.
*
*/
// This method is retained to keep binary compatibility with 0.11
public DescribeConfigsOptions timeoutMs(Integer timeoutMs) {
this.timeoutMs = timeoutMs;
return this;
}
/**
* Return true if synonym configs should be returned in the response.
*/
public boolean includeSynonyms() {
return includeSynonyms;
}
/**
* Return true if config documentation should be returned in the response.
*/
public boolean includeDocumentation() {
return includeDocumentation;
}
/**
* Set to true if synonym configs should be returned in the response.
*/
public DescribeConfigsOptions includeSynonyms(boolean includeSynonyms) {
this.includeSynonyms = includeSynonyms;
return this;
}
/**
* Set to true if config documentation should be returned in the response.
*/
public DescribeConfigsOptions includeDocumentation(boolean includeDocumentation) {
this.includeDocumentation = includeDocumentation;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeConfigsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.ConfigResource;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;
/**
* The result of the {@link KafkaAdminClient#describeConfigs(Collection)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeConfigsResult {
private final Map<ConfigResource, KafkaFuture<Config>> futures;
protected DescribeConfigsResult(Map<ConfigResource, KafkaFuture<Config>> futures) {
this.futures = futures;
}
/**
* Return a map from resources to futures which can be used to check the status of the configuration for each
* resource.
*/
public Map<ConfigResource, KafkaFuture<Config>> values() {
return futures;
}
/**
* Return a future which succeeds only if all the config descriptions succeed.
*/
public KafkaFuture<Map<ConfigResource, Config>> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).
thenApply(new KafkaFuture.BaseFunction<Void, Map<ConfigResource, Config>>() {
@Override
public Map<ConfigResource, Config> apply(Void v) {
Map<ConfigResource, Config> configs = new HashMap<>(futures.size());
for (Map.Entry<ConfigResource, KafkaFuture<Config>> entry : futures.entrySet()) {
try {
configs.put(entry.getKey(), entry.getValue().get());
} catch (InterruptedException | ExecutionException e) {
// This should be unreachable, because allOf ensured that all the futures
// completed successfully.
throw new RuntimeException(e);
}
}
return configs;
}
});
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeConsumerGroupsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for {@link Admin#describeConsumerGroups(Collection, DescribeConsumerGroupsOptions)}.
* <p>
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeConsumerGroupsOptions extends AbstractOptions<DescribeConsumerGroupsOptions> {
private boolean includeAuthorizedOperations;
public DescribeConsumerGroupsOptions includeAuthorizedOperations(boolean includeAuthorizedOperations) {
this.includeAuthorizedOperations = includeAuthorizedOperations;
return this;
}
public boolean includeAuthorizedOperations() {
return includeAuthorizedOperations;
}
} |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeConsumerGroupsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;
/**
* The result of the {@link KafkaAdminClient#describeConsumerGroups(Collection, DescribeConsumerGroupsOptions)}} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeConsumerGroupsResult {
private final Map<String, KafkaFuture<ConsumerGroupDescription>> futures;
public DescribeConsumerGroupsResult(final Map<String, KafkaFuture<ConsumerGroupDescription>> futures) {
this.futures = futures;
}
/**
* Return a map from group id to futures which yield group descriptions.
*/
public Map<String, KafkaFuture<ConsumerGroupDescription>> describedGroups() {
Map<String, KafkaFuture<ConsumerGroupDescription>> describedGroups = new HashMap<>();
futures.forEach((key, future) -> describedGroups.put(key, future));
return describedGroups;
}
/**
* Return a future which yields all ConsumerGroupDescription objects, if all the describes succeed.
*/
public KafkaFuture<Map<String, ConsumerGroupDescription>> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply(
nil -> {
Map<String, ConsumerGroupDescription> descriptions = new HashMap<>(futures.size());
futures.forEach((key, future) -> {
try {
descriptions.put(key, future.get());
} catch (InterruptedException | ExecutionException e) {
// This should be unreachable, since the KafkaFuture#allOf already ensured
// that all of the futures completed successfully.
throw new RuntimeException(e);
}
});
return descriptions;
});
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeDelegationTokenOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.List;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
/**
* Options for {@link Admin#describeDelegationToken(DescribeDelegationTokenOptions)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeDelegationTokenOptions extends AbstractOptions<DescribeDelegationTokenOptions> {
private List<KafkaPrincipal> owners;
/**
* if owners is null, all the user owned tokens and tokens where user have Describe permission
* will be returned.
* @param owners
* @return this instance
*/
public DescribeDelegationTokenOptions owners(List<KafkaPrincipal> owners) {
this.owners = owners;
return this;
}
public List<KafkaPrincipal> owners() {
return owners;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeDelegationTokenResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.List;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.security.token.delegation.DelegationToken;
/**
* The result of the {@link KafkaAdminClient#describeDelegationToken(DescribeDelegationTokenOptions)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeDelegationTokenResult {
private final KafkaFuture<List<DelegationToken>> delegationTokens;
DescribeDelegationTokenResult(KafkaFuture<List<DelegationToken>> delegationTokens) {
this.delegationTokens = delegationTokens;
}
/**
* Returns a future which yields list of delegation tokens
*/
public KafkaFuture<List<DelegationToken>> delegationTokens() {
return delegationTokens;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeFeaturesOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* Options for {@link AdminClient#describeFeatures(DescribeFeaturesOptions)}.
*
* The API of this class is evolving. See {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeFeaturesOptions extends AbstractOptions<DescribeFeaturesOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeFeaturesResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
/**
* The result of the {@link Admin#describeFeatures(DescribeFeaturesOptions)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
public class DescribeFeaturesResult {
private final KafkaFuture<FeatureMetadata> future;
DescribeFeaturesResult(KafkaFuture<FeatureMetadata> future) {
this.future = future;
}
public KafkaFuture<FeatureMetadata> featureMetadata() {
return future;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeLogDirsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for {@link Admin#describeLogDirs(Collection)}
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeLogDirsOptions extends AbstractOptions<DescribeLogDirsOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeLogDirsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.HashMap;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
/**
* The result of the {@link Admin#describeLogDirs(Collection)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeLogDirsResult {
private final Map<Integer, KafkaFuture<Map<String, LogDirDescription>>> futures;
DescribeLogDirsResult(Map<Integer, KafkaFuture<Map<String, LogDirDescription>>> futures) {
this.futures = futures;
}
/**
* Return a map from brokerId to future which can be used to check the information of partitions on each individual broker.
* @deprecated Deprecated Since Kafka 2.7. Use {@link #descriptions()}.
*/
@Deprecated
@SuppressWarnings("deprecation")
public Map<Integer, KafkaFuture<Map<String, DescribeLogDirsResponse.LogDirInfo>>> values() {
return descriptions().entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> entry.getValue().thenApply(map -> convertMapValues(map))));
}
@SuppressWarnings("deprecation")
private Map<String, DescribeLogDirsResponse.LogDirInfo> convertMapValues(Map<String, LogDirDescription> map) {
Stream<Map.Entry<String, LogDirDescription>> stream = map.entrySet().stream();
return stream.collect(Collectors.toMap(
Map.Entry::getKey,
infoEntry -> {
LogDirDescription logDir = infoEntry.getValue();
return new DescribeLogDirsResponse.LogDirInfo(logDir.error() == null ? Errors.NONE : Errors.forException(logDir.error()),
logDir.replicaInfos().entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
replicaEntry -> new DescribeLogDirsResponse.ReplicaInfo(
replicaEntry.getValue().size(),
replicaEntry.getValue().offsetLag(),
replicaEntry.getValue().isFuture())
)));
}));
}
/**
* Return a map from brokerId to future which can be used to check the information of partitions on each individual broker.
* The result of the future is a map from broker log directory path to a description of that log directory.
*/
public Map<Integer, KafkaFuture<Map<String, LogDirDescription>>> descriptions() {
return futures;
}
/**
* Return a future which succeeds only if all the brokers have responded without error
* @deprecated Deprecated Since Kafka 2.7. Use {@link #allDescriptions()}.
*/
@Deprecated
@SuppressWarnings("deprecation")
public KafkaFuture<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> all() {
return allDescriptions().thenApply(map -> map.entrySet().stream().collect(Collectors.toMap(
entry -> entry.getKey(),
entry -> convertMapValues(entry.getValue())
)));
}
/**
* Return a future which succeeds only if all the brokers have responded without error.
* The result of the future is a map from brokerId to a map from broker log directory path
* to a description of that log directory.
*/
public KafkaFuture<Map<Integer, Map<String, LogDirDescription>>> allDescriptions() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).
thenApply(v -> {
Map<Integer, Map<String, LogDirDescription>> descriptions = new HashMap<>(futures.size());
for (Map.Entry<Integer, KafkaFuture<Map<String, LogDirDescription>>> entry : futures.entrySet()) {
try {
descriptions.put(entry.getKey(), entry.getValue().get());
} catch (InterruptedException | ExecutionException e) {
// This should be unreachable, because allOf ensured that all the futures completed successfully.
throw new RuntimeException(e);
}
}
return descriptions;
});
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeMetadataQuorumOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
/**
* Options for {@link Admin#describeMetadataQuorum(DescribeMetadataQuorumOptions)}
*/
public class DescribeMetadataQuorumOptions extends AbstractOptions<DescribeMetadataQuorumOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeMetadataQuorumResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
/**
* The result of {@link Admin#describeMetadataQuorum(DescribeMetadataQuorumOptions)}
*/
public class DescribeMetadataQuorumResult {
private final KafkaFuture<QuorumInfo> quorumInfo;
DescribeMetadataQuorumResult(KafkaFuture<QuorumInfo> quorumInfo) {
this.quorumInfo = quorumInfo;
}
/**
* Returns a future containing the QuorumInfo
*/
public KafkaFuture<QuorumInfo> quorumInfo() {
return quorumInfo;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeProducersOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.Objects;
import java.util.OptionalInt;
/**
* Options for {@link Admin#describeProducers(Collection)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeProducersOptions extends AbstractOptions<DescribeProducersOptions> {
private OptionalInt brokerId = OptionalInt.empty();
public DescribeProducersOptions brokerId(int brokerId) {
this.brokerId = OptionalInt.of(brokerId);
return this;
}
public OptionalInt brokerId() {
return brokerId;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DescribeProducersOptions that = (DescribeProducersOptions) o;
return Objects.equals(brokerId, that.brokerId) &&
Objects.equals(timeoutMs, that.timeoutMs);
}
@Override
public int hashCode() {
return Objects.hash(brokerId, timeoutMs);
}
@Override
public String toString() {
return "DescribeProducersOptions(" +
"brokerId=" + brokerId +
", timeoutMs=" + timeoutMs +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeProducersResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
@InterfaceStability.Evolving
public class DescribeProducersResult {
private final Map<TopicPartition, KafkaFuture<PartitionProducerState>> futures;
DescribeProducersResult(Map<TopicPartition, KafkaFuture<PartitionProducerState>> futures) {
this.futures = futures;
}
public KafkaFuture<PartitionProducerState> partitionResult(final TopicPartition partition) {
KafkaFuture<PartitionProducerState> future = futures.get(partition);
if (future == null) {
throw new IllegalArgumentException("Topic partition " + partition +
" was not included in the request");
}
return future;
}
public KafkaFuture<Map<TopicPartition, PartitionProducerState>> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]))
.thenApply(nil -> {
Map<TopicPartition, PartitionProducerState> results = new HashMap<>(futures.size());
for (Map.Entry<TopicPartition, KafkaFuture<PartitionProducerState>> entry : futures.entrySet()) {
try {
results.put(entry.getKey(), entry.getValue().get());
} catch (InterruptedException | ExecutionException e) {
// This should be unreachable, because allOf ensured that all the futures completed successfully.
throw new KafkaException(e);
}
}
return results;
});
}
public static class PartitionProducerState {
private final List<ProducerState> activeProducers;
public PartitionProducerState(List<ProducerState> activeProducers) {
this.activeProducers = activeProducers;
}
public List<ProducerState> activeProducers() {
return activeProducers;
}
@Override
public String toString() {
return "PartitionProducerState(" +
"activeProducers=" + activeProducers +
')';
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeReplicaLogDirsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for {@link Admin#describeReplicaLogDirs(Collection)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeReplicaLogDirsOptions extends AbstractOptions<DescribeReplicaLogDirsOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartitionReplica;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
import java.util.HashMap;
import java.util.Map;
import java.util.Collection;
import java.util.concurrent.ExecutionException;
/**
* The result of {@link Admin#describeReplicaLogDirs(Collection)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeReplicaLogDirsResult {
private final Map<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>> futures;
DescribeReplicaLogDirsResult(Map<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>> futures) {
this.futures = futures;
}
/**
* Return a map from replica to future which can be used to check the log directory information of individual replicas
*/
public Map<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>> values() {
return futures;
}
/**
* Return a future which succeeds if log directory information of all replicas are available
*/
public KafkaFuture<Map<TopicPartitionReplica, ReplicaLogDirInfo>> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).
thenApply(new KafkaFuture.BaseFunction<Void, Map<TopicPartitionReplica, ReplicaLogDirInfo>>() {
@Override
public Map<TopicPartitionReplica, ReplicaLogDirInfo> apply(Void v) {
Map<TopicPartitionReplica, ReplicaLogDirInfo> replicaLogDirInfos = new HashMap<>();
for (Map.Entry<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>> entry : futures.entrySet()) {
try {
replicaLogDirInfos.put(entry.getKey(), entry.getValue().get());
} catch (InterruptedException | ExecutionException e) {
// This should be unreachable, because allOf ensured that all the futures completed successfully.
throw new RuntimeException(e);
}
}
return replicaLogDirInfos;
}
});
}
static public class ReplicaLogDirInfo {
// The current log directory of the replica of this partition on the given broker.
// Null if no replica is not found for this partition on the given broker.
private final String currentReplicaLogDir;
// Defined as max(HW of partition - LEO of the replica, 0).
private final long currentReplicaOffsetLag;
// The future log directory of the replica of this partition on the given broker.
// Null if the replica of this partition is not being moved to another log directory on the given broker.
private final String futureReplicaLogDir;
// The LEO of the replica - LEO of the future log of this replica in the destination log directory.
// -1 if either there is not replica for this partition or the replica of this partition is not being moved to another log directory on the given broker.
private final long futureReplicaOffsetLag;
ReplicaLogDirInfo() {
this(null, DescribeLogDirsResponse.INVALID_OFFSET_LAG, null, DescribeLogDirsResponse.INVALID_OFFSET_LAG);
}
ReplicaLogDirInfo(String currentReplicaLogDir,
long currentReplicaOffsetLag,
String futureReplicaLogDir,
long futureReplicaOffsetLag) {
this.currentReplicaLogDir = currentReplicaLogDir;
this.currentReplicaOffsetLag = currentReplicaOffsetLag;
this.futureReplicaLogDir = futureReplicaLogDir;
this.futureReplicaOffsetLag = futureReplicaOffsetLag;
}
public String getCurrentReplicaLogDir() {
return currentReplicaLogDir;
}
public long getCurrentReplicaOffsetLag() {
return currentReplicaOffsetLag;
}
public String getFutureReplicaLogDir() {
return futureReplicaLogDir;
}
public long getFutureReplicaOffsetLag() {
return futureReplicaOffsetLag;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
if (futureReplicaLogDir != null) {
builder.append("(currentReplicaLogDir=")
.append(currentReplicaLogDir)
.append(", futureReplicaLogDir=")
.append(futureReplicaLogDir)
.append(", futureReplicaOffsetLag=")
.append(futureReplicaOffsetLag)
.append(")");
} else {
builder.append("ReplicaLogDirInfo(currentReplicaLogDir=").append(currentReplicaLogDir).append(")");
}
return builder.toString();
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeTopicsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for {@link Admin#describeTopics(Collection)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeTopicsOptions extends AbstractOptions<DescribeTopicsOptions> {
private boolean includeAuthorizedOperations;
/**
* Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the
* AdminClient should be used.
*
*/
// This method is retained to keep binary compatibility with 0.11
public DescribeTopicsOptions timeoutMs(Integer timeoutMs) {
this.timeoutMs = timeoutMs;
return this;
}
public DescribeTopicsOptions includeAuthorizedOperations(boolean includeAuthorizedOperations) {
this.includeAuthorizedOperations = includeAuthorizedOperations;
return this;
}
public boolean includeAuthorizedOperations() {
return includeAuthorizedOperations;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeTopicsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicCollection;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;
/**
* The result of the {@link KafkaAdminClient#describeTopics(Collection)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeTopicsResult {
private final Map<Uuid, KafkaFuture<TopicDescription>> topicIdFutures;
private final Map<String, KafkaFuture<TopicDescription>> nameFutures;
@Deprecated
protected DescribeTopicsResult(Map<String, KafkaFuture<TopicDescription>> futures) {
this(null, futures);
}
// VisibleForTesting
protected DescribeTopicsResult(Map<Uuid, KafkaFuture<TopicDescription>> topicIdFutures, Map<String, KafkaFuture<TopicDescription>> nameFutures) {
if (topicIdFutures != null && nameFutures != null)
throw new IllegalArgumentException("topicIdFutures and nameFutures cannot both be specified.");
if (topicIdFutures == null && nameFutures == null)
throw new IllegalArgumentException("topicIdFutures and nameFutures cannot both be null.");
this.topicIdFutures = topicIdFutures;
this.nameFutures = nameFutures;
}
static DescribeTopicsResult ofTopicIds(Map<Uuid, KafkaFuture<TopicDescription>> topicIdFutures) {
return new DescribeTopicsResult(topicIdFutures, null);
}
static DescribeTopicsResult ofTopicNames(Map<String, KafkaFuture<TopicDescription>> nameFutures) {
return new DescribeTopicsResult(null, nameFutures);
}
/**
* Use when {@link Admin#describeTopics(TopicCollection, DescribeTopicsOptions)} used a TopicIdCollection
*
* @return a map from topic IDs to futures which can be used to check the status of
* individual topics if the request used topic IDs, otherwise return null.
*/
public Map<Uuid, KafkaFuture<TopicDescription>> topicIdValues() {
return topicIdFutures;
}
/**
* Use when {@link Admin#describeTopics(TopicCollection, DescribeTopicsOptions)} used a TopicNameCollection
*
* @return a map from topic names to futures which can be used to check the status of
* individual topics if the request used topic names, otherwise return null.
*/
public Map<String, KafkaFuture<TopicDescription>> topicNameValues() {
return nameFutures;
}
/**
* @return a map from topic names to futures which can be used to check the status of
* individual topics if the request used topic names, otherwise return null.
*
* @deprecated Since 3.1.0 use {@link #topicNameValues} instead
*/
@Deprecated
public Map<String, KafkaFuture<TopicDescription>> values() {
return nameFutures;
}
/**
* @return A future map from topic names to descriptions which can be used to check
* the status of individual description if the describe topic request used
* topic names, otherwise return null, this request succeeds only if all the
* topic descriptions succeed
*
* @deprecated Since 3.1.0 use {@link #allTopicNames()} instead
*/
@Deprecated
public KafkaFuture<Map<String, TopicDescription>> all() {
return all(nameFutures);
}
/**
* @return A future map from topic names to descriptions which can be used to check
* the status of individual description if the describe topic request used
* topic names, otherwise return null, this request succeeds only if all the
* topic descriptions succeed
*/
public KafkaFuture<Map<String, TopicDescription>> allTopicNames() {
return all(nameFutures);
}
/**
* @return A future map from topic ids to descriptions which can be used to check the
* status of individual description if the describe topic request used topic
* ids, otherwise return null, this request succeeds only if all the topic
* descriptions succeed
*/
public KafkaFuture<Map<Uuid, TopicDescription>> allTopicIds() {
return all(topicIdFutures);
}
/**
* Return a future which succeeds only if all the topic descriptions succeed.
*/
private static <T> KafkaFuture<Map<T, TopicDescription>> all(Map<T, KafkaFuture<TopicDescription>> futures) {
KafkaFuture<Void> future = KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
return future.
thenApply(v -> {
Map<T, TopicDescription> descriptions = new HashMap<>(futures.size());
for (Map.Entry<T, KafkaFuture<TopicDescription>> entry : futures.entrySet()) {
try {
descriptions.put(entry.getKey(), entry.getValue().get());
} catch (InterruptedException | ExecutionException e) {
// This should be unreachable, because allOf ensured that all the futures
// completed successfully.
throw new RuntimeException(e);
}
}
return descriptions;
});
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeTransactionsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for {@link Admin#describeTransactions(Collection)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeTransactionsOptions extends AbstractOptions<DescribeTransactionsOptions> {
@Override
public String toString() {
return "DescribeTransactionsOptions(" +
"timeoutMs=" + timeoutMs +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeTransactionsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.clients.admin.internals.CoordinatorKey;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;
@InterfaceStability.Evolving
public class DescribeTransactionsResult {
private final Map<CoordinatorKey, KafkaFuture<TransactionDescription>> futures;
DescribeTransactionsResult(Map<CoordinatorKey, KafkaFuture<TransactionDescription>> futures) {
this.futures = futures;
}
/**
* Get the description of a specific transactional ID.
*
* @param transactionalId the transactional ID to describe
* @return a future which completes when the transaction description of a particular
* transactional ID is available.
* @throws IllegalArgumentException if the `transactionalId` was not included in the
* respective call to {@link Admin#describeTransactions(Collection, DescribeTransactionsOptions)}.
*/
public KafkaFuture<TransactionDescription> description(String transactionalId) {
CoordinatorKey key = CoordinatorKey.byTransactionalId(transactionalId);
KafkaFuture<TransactionDescription> future = futures.get(key);
if (future == null) {
throw new IllegalArgumentException("TransactionalId " +
"`" + transactionalId + "` was not included in the request");
}
return future;
}
/**
* Get a future which returns a map of the transaction descriptions requested in the respective
* call to {@link Admin#describeTransactions(Collection, DescribeTransactionsOptions)}.
*
* If the description fails on any of the transactional IDs in the request, then this future
* will also fail.
*
* @return a future which either completes when all transaction descriptions complete or fails
* if any of the descriptions cannot be obtained
*/
public KafkaFuture<Map<String, TransactionDescription>> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]))
.thenApply(nil -> {
Map<String, TransactionDescription> results = new HashMap<>(futures.size());
for (Map.Entry<CoordinatorKey, KafkaFuture<TransactionDescription>> entry : futures.entrySet()) {
try {
results.put(entry.getKey().idValue, entry.getValue().get());
} catch (InterruptedException | ExecutionException e) {
// This should be unreachable, because allOf ensured that all the futures completed successfully.
throw new RuntimeException(e);
}
}
return results;
});
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeUserScramCredentialsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.List;
/**
* Options for {@link AdminClient#describeUserScramCredentials(List, DescribeUserScramCredentialsOptions)}
*
* The API of this class is evolving. See {@link AdminClient} for details.
*/
@InterfaceStability.Evolving
public class DescribeUserScramCredentialsOptions extends AbstractOptions<DescribeUserScramCredentialsOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.errors.ResourceNotFoundException;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import org.apache.kafka.common.message.DescribeUserScramCredentialsResponseData;
import org.apache.kafka.common.protocol.Errors;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
/**
* The result of the {@link Admin#describeUserScramCredentials()} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class DescribeUserScramCredentialsResult {
private final KafkaFuture<DescribeUserScramCredentialsResponseData> dataFuture;
/**
* Package-private constructor
*
* @param dataFuture the future indicating response data from the call
*/
DescribeUserScramCredentialsResult(KafkaFuture<DescribeUserScramCredentialsResponseData> dataFuture) {
this.dataFuture = Objects.requireNonNull(dataFuture);
}
/**
*
* @return a future for the results of all described users with map keys (one per user) being consistent with the
* contents of the list returned by {@link #users()}. The future will complete successfully only if all such user
* descriptions complete successfully.
*/
public KafkaFuture<Map<String, UserScramCredentialsDescription>> all() {
final KafkaFutureImpl<Map<String, UserScramCredentialsDescription>> retval = new KafkaFutureImpl<>();
dataFuture.whenComplete((data, throwable) -> {
if (throwable != null) {
retval.completeExceptionally(throwable);
} else {
/* Check to make sure every individual described user succeeded. Note that a successfully described user
* is one that appears with *either* a NONE error code or a RESOURCE_NOT_FOUND error code. The
* RESOURCE_NOT_FOUND means the client explicitly requested a describe of that particular user but it could
* not be described because it does not exist; such a user will not appear as a key in the returned map.
*/
Optional<DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult> optionalFirstFailedDescribe =
data.results().stream().filter(result ->
result.errorCode() != Errors.NONE.code() && result.errorCode() != Errors.RESOURCE_NOT_FOUND.code()).findFirst();
if (optionalFirstFailedDescribe.isPresent()) {
retval.completeExceptionally(Errors.forCode(optionalFirstFailedDescribe.get().errorCode()).exception(optionalFirstFailedDescribe.get().errorMessage()));
} else {
Map<String, UserScramCredentialsDescription> retvalMap = new HashMap<>();
data.results().stream().forEach(userResult ->
retvalMap.put(userResult.user(), new UserScramCredentialsDescription(userResult.user(),
getScramCredentialInfosFor(userResult))));
retval.complete(retvalMap);
}
}
});
return retval;
}
/**
*
* @return a future indicating the distinct users that meet the request criteria and that have at least one
* credential. The future will not complete successfully if the user is not authorized to perform the describe
* operation; otherwise, it will complete successfully as long as the list of users with credentials can be
* successfully determined within some hard-coded timeout period. Note that the returned list will not include users
* that do not exist/have no credentials: a request to describe an explicit list of users, none of which existed/had
* a credential, will result in a future that returns an empty list being returned here. A returned list will
* include users that have a credential but that could not be described.
*/
public KafkaFuture<List<String>> users() {
final KafkaFutureImpl<List<String>> retval = new KafkaFutureImpl<>();
dataFuture.whenComplete((data, throwable) -> {
if (throwable != null) {
retval.completeExceptionally(throwable);
} else {
retval.complete(data.results().stream()
.filter(result -> result.errorCode() != Errors.RESOURCE_NOT_FOUND.code())
.map(result -> result.user()).collect(Collectors.toList()));
}
});
return retval;
}
/**
*
* @param userName the name of the user description being requested
* @return a future indicating the description results for the given user. The future will complete exceptionally if
* the future returned by {@link #users()} completes exceptionally. Note that if the given user does not exist in
* the list of described users then the returned future will complete exceptionally with
* {@link org.apache.kafka.common.errors.ResourceNotFoundException}.
*/
public KafkaFuture<UserScramCredentialsDescription> description(String userName) {
final KafkaFutureImpl<UserScramCredentialsDescription> retval = new KafkaFutureImpl<>();
dataFuture.whenComplete((data, throwable) -> {
if (throwable != null) {
retval.completeExceptionally(throwable);
} else {
// it is possible that there is no future for this user (for example, the original describe request was
// for users 1, 2, and 3 but this is looking for user 4), so explicitly take care of that case
Optional<DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult> optionalUserResult =
data.results().stream().filter(result -> result.user().equals(userName)).findFirst();
if (!optionalUserResult.isPresent()) {
retval.completeExceptionally(new ResourceNotFoundException("No such user: " + userName));
} else {
DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult userResult = optionalUserResult.get();
if (userResult.errorCode() != Errors.NONE.code()) {
// RESOURCE_NOT_FOUND is included here
retval.completeExceptionally(Errors.forCode(userResult.errorCode()).exception(userResult.errorMessage()));
} else {
retval.complete(new UserScramCredentialsDescription(userResult.user(), getScramCredentialInfosFor(userResult)));
}
}
}
});
return retval;
}
private static List<ScramCredentialInfo> getScramCredentialInfosFor(
DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult userResult) {
return userResult.credentialInfos().stream().map(c ->
new ScramCredentialInfo(ScramMechanism.fromType(c.mechanism()), c.iterations()))
.collect(Collectors.toList());
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ElectLeadersOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.ElectionType;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Set;
/**
* Options for {@link Admin#electLeaders(ElectionType, Set, ElectLeadersOptions)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
final public class ElectLeadersOptions extends AbstractOptions<ElectLeadersOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ElectLeadersResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import org.apache.kafka.common.ElectionType;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.internals.KafkaFutureImpl;
/**
* The result of {@link Admin#electLeaders(ElectionType, Set, ElectLeadersOptions)}
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
final public class ElectLeadersResult {
private final KafkaFuture<Map<TopicPartition, Optional<Throwable>>> electionFuture;
ElectLeadersResult(KafkaFuture<Map<TopicPartition, Optional<Throwable>>> electionFuture) {
this.electionFuture = electionFuture;
}
/**
* <p>Get a future for the topic partitions for which a leader election was attempted.
* If the election succeeded then the value for a topic partition will be the empty Optional.
* Otherwise the election failed and the Optional will be set with the error.</p>
*/
public KafkaFuture<Map<TopicPartition, Optional<Throwable>>> partitions() {
return electionFuture;
}
/**
* Return a future which succeeds if all the topic elections succeed.
*/
public KafkaFuture<Void> all() {
final KafkaFutureImpl<Void> result = new KafkaFutureImpl<>();
partitions().whenComplete(
new KafkaFuture.BiConsumer<Map<TopicPartition, Optional<Throwable>>, Throwable>() {
@Override
public void accept(Map<TopicPartition, Optional<Throwable>> topicPartitions, Throwable throwable) {
if (throwable != null) {
result.completeExceptionally(throwable);
} else {
for (Optional<Throwable> exception : topicPartitions.values()) {
if (exception.isPresent()) {
result.completeExceptionally(exception.get());
return;
}
}
result.complete(null);
}
}
});
return result;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ExpireDelegationTokenOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* Options for {@link Admin#expireDelegationToken(byte[], ExpireDelegationTokenOptions)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class ExpireDelegationTokenOptions extends AbstractOptions<ExpireDelegationTokenOptions> {
private long expiryTimePeriodMs = -1L;
public ExpireDelegationTokenOptions expiryTimePeriodMs(long expiryTimePeriodMs) {
this.expiryTimePeriodMs = expiryTimePeriodMs;
return this;
}
public long expiryTimePeriodMs() {
return expiryTimePeriodMs;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ExpireDelegationTokenResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* The result of the {@link KafkaAdminClient#expireDelegationToken(byte[], ExpireDelegationTokenOptions)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class ExpireDelegationTokenResult {
private final KafkaFuture<Long> expiryTimestamp;
ExpireDelegationTokenResult(KafkaFuture<Long> expiryTimestamp) {
this.expiryTimestamp = expiryTimestamp;
}
/**
* Returns a future which yields expiry timestamp
*/
public KafkaFuture<Long> expiryTimestamp() {
return expiryTimestamp;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/FeatureMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import static java.util.stream.Collectors.joining;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
/**
* Encapsulates details about finalized as well as supported features. This is particularly useful
* to hold the result returned by the {@link Admin#describeFeatures(DescribeFeaturesOptions)} API.
*/
public class FeatureMetadata {
private final Map<String, FinalizedVersionRange> finalizedFeatures;
private final Optional<Long> finalizedFeaturesEpoch;
private final Map<String, SupportedVersionRange> supportedFeatures;
FeatureMetadata(final Map<String, FinalizedVersionRange> finalizedFeatures,
final Optional<Long> finalizedFeaturesEpoch,
final Map<String, SupportedVersionRange> supportedFeatures) {
this.finalizedFeatures = new HashMap<>(finalizedFeatures);
this.finalizedFeaturesEpoch = finalizedFeaturesEpoch;
this.supportedFeatures = new HashMap<>(supportedFeatures);
}
/**
* Returns a map of finalized feature versions. Each entry in the map contains a key being a
* feature name and the value being a range of version levels supported by every broker in the
* cluster.
*/
public Map<String, FinalizedVersionRange> finalizedFeatures() {
return new HashMap<>(finalizedFeatures);
}
/**
* The epoch for the finalized features.
* If the returned value is empty, it means the finalized features are absent/unavailable.
*/
public Optional<Long> finalizedFeaturesEpoch() {
return finalizedFeaturesEpoch;
}
/**
* Returns a map of supported feature versions. Each entry in the map contains a key being a
* feature name and the value being a range of versions supported by a particular broker in the
* cluster.
*/
public Map<String, SupportedVersionRange> supportedFeatures() {
return new HashMap<>(supportedFeatures);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof FeatureMetadata)) {
return false;
}
final FeatureMetadata that = (FeatureMetadata) other;
return Objects.equals(this.finalizedFeatures, that.finalizedFeatures) &&
Objects.equals(this.finalizedFeaturesEpoch, that.finalizedFeaturesEpoch) &&
Objects.equals(this.supportedFeatures, that.supportedFeatures);
}
@Override
public int hashCode() {
return Objects.hash(finalizedFeatures, finalizedFeaturesEpoch, supportedFeatures);
}
private static <ValueType> String mapToString(final Map<String, ValueType> featureVersionsMap) {
return String.format(
"{%s}",
featureVersionsMap
.entrySet()
.stream()
.map(entry -> String.format("(%s -> %s)", entry.getKey(), entry.getValue()))
.collect(joining(", "))
);
}
@Override
public String toString() {
return String.format(
"FeatureMetadata{finalizedFeatures:%s, finalizedFeaturesEpoch:%s, supportedFeatures:%s}",
mapToString(finalizedFeatures),
finalizedFeaturesEpoch.map(Object::toString).orElse("<none>"),
mapToString(supportedFeatures));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/FeatureUpdate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Objects;
/**
* Encapsulates details about an update to a finalized feature.
*/
public class FeatureUpdate {
private final short maxVersionLevel;
private final UpgradeType upgradeType;
public enum UpgradeType {
UNKNOWN(0),
UPGRADE(1),
SAFE_DOWNGRADE(2),
UNSAFE_DOWNGRADE(3);
private final byte code;
UpgradeType(int code) {
this.code = (byte) code;
}
public byte code() {
return code;
}
public static UpgradeType fromCode(int code) {
if (code == 1) {
return UPGRADE;
} else if (code == 2) {
return SAFE_DOWNGRADE;
} else if (code == 3) {
return UNSAFE_DOWNGRADE;
} else {
return UNKNOWN;
}
}
}
/**
* @param maxVersionLevel the new maximum version level for the finalized feature.
* a value of zero is special and indicates that the update is intended to
* delete the finalized feature, and should be accompanied by setting
* the allowDowngrade flag to true.
* @param allowDowngrade - true, if this feature update was meant to downgrade the existing
* maximum version level of the finalized feature. Only "safe" downgrades are
* enabled with this boolean. See {@link FeatureUpdate#FeatureUpdate(short, UpgradeType)}
* - false, otherwise.
*/
@Deprecated
public FeatureUpdate(final short maxVersionLevel, final boolean allowDowngrade) {
this(maxVersionLevel, allowDowngrade ? UpgradeType.SAFE_DOWNGRADE : UpgradeType.UPGRADE);
}
/**
* @param maxVersionLevel The new maximum version level for the finalized feature.
* a value of zero is special and indicates that the update is intended to
* delete the finalized feature, and should be accompanied by setting
* the upgradeType to safe or unsafe.
* @param upgradeType Indicate what kind of upgrade should be performed in this operation.
* - UPGRADE: upgrading the feature level
* - SAFE_DOWNGRADE: only downgrades which do not result in metadata loss are permitted
* - UNSAFE_DOWNGRADE: any downgrade, including those which may result in metadata loss, are permitted
*/
public FeatureUpdate(final short maxVersionLevel, final UpgradeType upgradeType) {
if (maxVersionLevel == 0 && upgradeType.equals(UpgradeType.UPGRADE)) {
throw new IllegalArgumentException(String.format(
"The downgradeType flag should be set to SAFE or UNSAFE when the provided maxVersionLevel:%d is < 1.",
maxVersionLevel));
}
if (maxVersionLevel < 0) {
throw new IllegalArgumentException("Cannot specify a negative version level.");
}
this.maxVersionLevel = maxVersionLevel;
this.upgradeType = upgradeType;
}
public short maxVersionLevel() {
return maxVersionLevel;
}
@Deprecated
public boolean allowDowngrade() {
return upgradeType != UpgradeType.UPGRADE;
}
public UpgradeType upgradeType() {
return upgradeType;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof FeatureUpdate)) {
return false;
}
final FeatureUpdate that = (FeatureUpdate) other;
return this.maxVersionLevel == that.maxVersionLevel && this.upgradeType.equals(that.upgradeType);
}
@Override
public int hashCode() {
return Objects.hash(maxVersionLevel, upgradeType);
}
@Override
public String toString() {
return String.format("FeatureUpdate{maxVersionLevel:%d, downgradeType:%s}", maxVersionLevel, upgradeType);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/FenceProducersOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for {@link Admin#fenceProducers(Collection, FenceProducersOptions)}
*
* The API of this class is evolving. See {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class FenceProducersOptions extends AbstractOptions<FenceProducersOptions> {
@Override
public String toString() {
return "FenceProducersOptions{" +
"timeoutMs=" + timeoutMs +
'}';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/FenceProducersResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.clients.admin.internals.CoordinatorKey;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.utils.ProducerIdAndEpoch;
import java.util.Collection;
import java.util.Map;
import java.util.stream.Collectors;
/**
* The result of the {@link Admin#fenceProducers(Collection)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class FenceProducersResult {
private final Map<CoordinatorKey, KafkaFuture<ProducerIdAndEpoch>> futures;
FenceProducersResult(Map<CoordinatorKey, KafkaFuture<ProducerIdAndEpoch>> futures) {
this.futures = futures;
}
/**
* Return a map from transactional ID to futures which can be used to check the status of
* individual fencings.
*/
public Map<String, KafkaFuture<Void>> fencedProducers() {
return futures.entrySet().stream().collect(Collectors.toMap(
e -> e.getKey().idValue,
e -> e.getValue().thenApply(p -> null)
));
}
/**
* Returns a future that provides the producer ID generated while initializing the given transaction when the request completes.
*/
public KafkaFuture<Long> producerId(String transactionalId) {
return findAndApply(transactionalId, p -> p.producerId);
}
/**
* Returns a future that provides the epoch ID generated while initializing the given transaction when the request completes.
*/
public KafkaFuture<Short> epochId(String transactionalId) {
return findAndApply(transactionalId, p -> p.epoch);
}
/**
* Return a future which succeeds only if all the producer fencings succeed.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
}
private <T> KafkaFuture<T> findAndApply(String transactionalId, KafkaFuture.BaseFunction<ProducerIdAndEpoch, T> followup) {
CoordinatorKey key = CoordinatorKey.byTransactionalId(transactionalId);
KafkaFuture<ProducerIdAndEpoch> future = futures.get(key);
if (future == null) {
throw new IllegalArgumentException("TransactionalId " +
"`" + transactionalId + "` was not included in the request");
}
return future.thenApply(followup);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/FinalizedVersionRange.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Objects;
/**
* Represents a range of version levels supported by every broker in a cluster for some feature.
*/
public class FinalizedVersionRange {
private final short minVersionLevel;
private final short maxVersionLevel;
/**
* Raises an exception unless the following condition is met:
* minVersionLevel >= 1 and maxVersionLevel >= 1 and maxVersionLevel >= minVersionLevel.
*
* @param minVersionLevel The minimum version level value.
* @param maxVersionLevel The maximum version level value.
*
* @throws IllegalArgumentException Raised when the condition described above is not met.
*/
FinalizedVersionRange(final short minVersionLevel, final short maxVersionLevel) {
if (minVersionLevel < 0 || maxVersionLevel < 0 || maxVersionLevel < minVersionLevel) {
throw new IllegalArgumentException(
String.format(
"Expected minVersionLevel >= 0, maxVersionLevel >= 0 and" +
" maxVersionLevel >= minVersionLevel, but received" +
" minVersionLevel: %d, maxVersionLevel: %d", minVersionLevel, maxVersionLevel));
}
this.minVersionLevel = minVersionLevel;
this.maxVersionLevel = maxVersionLevel;
}
public short minVersionLevel() {
return minVersionLevel;
}
public short maxVersionLevel() {
return maxVersionLevel;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof FinalizedVersionRange)) {
return false;
}
final FinalizedVersionRange that = (FinalizedVersionRange) other;
return this.minVersionLevel == that.minVersionLevel &&
this.maxVersionLevel == that.maxVersionLevel;
}
@Override
public int hashCode() {
return Objects.hash(minVersionLevel, maxVersionLevel);
}
@Override
public String toString() {
return String.format(
"FinalizedVersionRange[min_version_level:%d, max_version_level:%d]",
minVersionLevel,
maxVersionLevel);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ForwardingAdmin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.ElectionType;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.TopicCollection;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicPartitionReplica;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.quota.ClientQuotaAlteration;
import org.apache.kafka.common.quota.ClientQuotaFilter;
import java.time.Duration;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
/**
* {@code ForwardingAdmin} is the default value of {@code forwarding.admin.class} in MirrorMaker.
* Users who wish to customize the MirrorMaker behaviour for the creation of topics and access control lists can extend this
* class without needing to provide a whole implementation of {@code Admin}.
* The class must have a constructor with signature {@code (Map<String, Object> config)} for configuring
* a decorated {@link KafkaAdminClient} and any other clients needed for external resource management.
*/
public class ForwardingAdmin implements Admin {
private final Admin delegate;
public ForwardingAdmin(Map<String, Object> configs) {
this.delegate = Admin.create(configs);
}
@Override
public void close(Duration timeout) {
delegate.close(timeout);
}
@Override
public CreateTopicsResult createTopics(Collection<NewTopic> newTopics, CreateTopicsOptions options) {
return delegate.createTopics(newTopics, options);
}
@Override
public DeleteTopicsResult deleteTopics(TopicCollection topics, DeleteTopicsOptions options) {
return delegate.deleteTopics(topics, options);
}
@Override
public ListTopicsResult listTopics(ListTopicsOptions options) {
return delegate.listTopics(options);
}
@Override
public DescribeTopicsResult describeTopics(TopicCollection topics, DescribeTopicsOptions options) {
return delegate.describeTopics(topics, options);
}
@Override
public DescribeClusterResult describeCluster(DescribeClusterOptions options) {
return delegate.describeCluster(options);
}
@Override
public DescribeAclsResult describeAcls(AclBindingFilter filter, DescribeAclsOptions options) {
return delegate.describeAcls(filter, options);
}
@Override
public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
return delegate.createAcls(acls, options);
}
@Override
public DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options) {
return delegate.deleteAcls(filters, options);
}
@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
return delegate.describeConfigs(resources, options);
}
@Deprecated
@Override
public AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, AlterConfigsOptions options) {
return delegate.alterConfigs(configs, options);
}
@Override
public AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>> configs, AlterConfigsOptions options) {
return delegate.incrementalAlterConfigs(configs, options);
}
@Override
public AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment, AlterReplicaLogDirsOptions options) {
return delegate.alterReplicaLogDirs(replicaAssignment, options);
}
@Override
public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) {
return delegate.describeLogDirs(brokers, options);
}
@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
return delegate.describeReplicaLogDirs(replicas, options);
}
@Override
public CreatePartitionsResult createPartitions(Map<String, NewPartitions> newPartitions, CreatePartitionsOptions options) {
return delegate.createPartitions(newPartitions, options);
}
@Override
public DeleteRecordsResult deleteRecords(Map<TopicPartition, RecordsToDelete> recordsToDelete, DeleteRecordsOptions options) {
return delegate.deleteRecords(recordsToDelete, options);
}
@Override
public CreateDelegationTokenResult createDelegationToken(CreateDelegationTokenOptions options) {
return delegate.createDelegationToken(options);
}
@Override
public RenewDelegationTokenResult renewDelegationToken(byte[] hmac, RenewDelegationTokenOptions options) {
return delegate.renewDelegationToken(hmac, options);
}
@Override
public ExpireDelegationTokenResult expireDelegationToken(byte[] hmac, ExpireDelegationTokenOptions options) {
return delegate.expireDelegationToken(hmac, options);
}
@Override
public DescribeDelegationTokenResult describeDelegationToken(DescribeDelegationTokenOptions options) {
return delegate.describeDelegationToken(options);
}
@Override
public DescribeConsumerGroupsResult describeConsumerGroups(Collection<String> groupIds, DescribeConsumerGroupsOptions options) {
return delegate.describeConsumerGroups(groupIds, options);
}
@Override
public ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options) {
return delegate.listConsumerGroups(options);
}
@Override
public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec> groupSpecs, ListConsumerGroupOffsetsOptions options) {
return delegate.listConsumerGroupOffsets(groupSpecs, options);
}
@Override
public DeleteConsumerGroupsResult deleteConsumerGroups(Collection<String> groupIds, DeleteConsumerGroupsOptions options) {
return delegate.deleteConsumerGroups(groupIds, options);
}
@Override
public DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, Set<TopicPartition> partitions, DeleteConsumerGroupOffsetsOptions options) {
return delegate.deleteConsumerGroupOffsets(groupId, partitions, options);
}
@Override
public ElectLeadersResult electLeaders(ElectionType electionType, Set<TopicPartition> partitions, ElectLeadersOptions options) {
return delegate.electLeaders(electionType, partitions, options);
}
@Override
public AlterPartitionReassignmentsResult alterPartitionReassignments(Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments, AlterPartitionReassignmentsOptions options) {
return delegate.alterPartitionReassignments(reassignments, options);
}
@Override
public ListPartitionReassignmentsResult listPartitionReassignments(Optional<Set<TopicPartition>> partitions, ListPartitionReassignmentsOptions options) {
return delegate.listPartitionReassignments(partitions, options);
}
@Override
public RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(String groupId, RemoveMembersFromConsumerGroupOptions options) {
return delegate.removeMembersFromConsumerGroup(groupId, options);
}
@Override
public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, Map<TopicPartition, OffsetAndMetadata> offsets, AlterConsumerGroupOffsetsOptions options) {
return delegate.alterConsumerGroupOffsets(groupId, offsets, options);
}
@Override
public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) {
return delegate.listOffsets(topicPartitionOffsets, options);
}
@Override
public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options) {
return delegate.describeClientQuotas(filter, options);
}
@Override
public AlterClientQuotasResult alterClientQuotas(Collection<ClientQuotaAlteration> entries, AlterClientQuotasOptions options) {
return delegate.alterClientQuotas(entries, options);
}
@Override
public DescribeUserScramCredentialsResult describeUserScramCredentials(List<String> users, DescribeUserScramCredentialsOptions options) {
return delegate.describeUserScramCredentials(users, options);
}
@Override
public AlterUserScramCredentialsResult alterUserScramCredentials(List<UserScramCredentialAlteration> alterations, AlterUserScramCredentialsOptions options) {
return delegate.alterUserScramCredentials(alterations, options);
}
@Override
public DescribeFeaturesResult describeFeatures(DescribeFeaturesOptions options) {
return delegate.describeFeatures(options);
}
@Override
public UpdateFeaturesResult updateFeatures(Map<String, FeatureUpdate> featureUpdates, UpdateFeaturesOptions options) {
return delegate.updateFeatures(featureUpdates, options);
}
@Override
public DescribeMetadataQuorumResult describeMetadataQuorum(DescribeMetadataQuorumOptions options) {
return delegate.describeMetadataQuorum(options);
}
@Override
public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options) {
return delegate.unregisterBroker(brokerId, options);
}
@Override
public DescribeProducersResult describeProducers(Collection<TopicPartition> partitions, DescribeProducersOptions options) {
return delegate.describeProducers(partitions, options);
}
@Override
public DescribeTransactionsResult describeTransactions(Collection<String> transactionalIds, DescribeTransactionsOptions options) {
return delegate.describeTransactions(transactionalIds, options);
}
@Override
public AbortTransactionResult abortTransaction(AbortTransactionSpec spec, AbortTransactionOptions options) {
return delegate.abortTransaction(spec, options);
}
@Override
public ListTransactionsResult listTransactions(ListTransactionsOptions options) {
return delegate.listTransactions(options);
}
@Override
public FenceProducersResult fenceProducers(Collection<String> transactionalIds, FenceProducersOptions options) {
return delegate.fenceProducers(transactionalIds, options);
}
@Override
public Map<MetricName, ? extends Metric> metrics() {
return delegate.metrics();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/KafkaAdminClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.clients.ApiVersions;
import org.apache.kafka.clients.ClientRequest;
import org.apache.kafka.clients.ClientResponse;
import org.apache.kafka.clients.ClientUtils;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.DefaultHostResolver;
import org.apache.kafka.clients.HostResolver;
import org.apache.kafka.clients.KafkaClient;
import org.apache.kafka.clients.NetworkClient;
import org.apache.kafka.clients.StaleMetadataException;
import org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig;
import org.apache.kafka.clients.admin.DeleteAclsResult.FilterResult;
import org.apache.kafka.clients.admin.DeleteAclsResult.FilterResults;
import org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo;
import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo;
import org.apache.kafka.clients.admin.OffsetSpec.TimestampSpec;
import org.apache.kafka.clients.admin.internals.AbortTransactionHandler;
import org.apache.kafka.clients.admin.internals.AdminApiDriver;
import org.apache.kafka.clients.admin.internals.AdminApiHandler;
import org.apache.kafka.clients.admin.internals.AdminApiFuture;
import org.apache.kafka.clients.admin.internals.AdminApiFuture.SimpleAdminApiFuture;
import org.apache.kafka.clients.admin.internals.AdminMetadataManager;
import org.apache.kafka.clients.admin.internals.AllBrokersStrategy;
import org.apache.kafka.clients.admin.internals.AlterConsumerGroupOffsetsHandler;
import org.apache.kafka.clients.admin.internals.CoordinatorKey;
import org.apache.kafka.clients.admin.internals.DeleteConsumerGroupOffsetsHandler;
import org.apache.kafka.clients.admin.internals.DeleteConsumerGroupsHandler;
import org.apache.kafka.clients.admin.internals.DescribeConsumerGroupsHandler;
import org.apache.kafka.clients.admin.internals.DescribeProducersHandler;
import org.apache.kafka.clients.admin.internals.DescribeTransactionsHandler;
import org.apache.kafka.clients.admin.internals.FenceProducersHandler;
import org.apache.kafka.clients.admin.internals.ListConsumerGroupOffsetsHandler;
import org.apache.kafka.clients.admin.internals.ListTransactionsHandler;
import org.apache.kafka.clients.admin.internals.MetadataOperationContext;
import org.apache.kafka.clients.admin.internals.RemoveMembersFromConsumerGroupHandler;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.internals.ConsumerProtocol;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.ConsumerGroupState;
import org.apache.kafka.common.ElectionType;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicCollection;
import org.apache.kafka.common.TopicCollection.TopicIdCollection;
import org.apache.kafka.common.TopicCollection.TopicNameCollection;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicPartitionInfo;
import org.apache.kafka.common.TopicPartitionReplica;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.errors.ApiException;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.DisconnectException;
import org.apache.kafka.common.errors.InvalidRequestException;
import org.apache.kafka.common.errors.InvalidTopicException;
import org.apache.kafka.common.errors.KafkaStorageException;
import org.apache.kafka.common.errors.RetriableException;
import org.apache.kafka.common.errors.ThrottlingQuotaExceededException;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.errors.UnacceptableCredentialException;
import org.apache.kafka.common.errors.UnknownServerException;
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
import org.apache.kafka.common.errors.UnsupportedSaslMechanismException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData;
import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignableTopic;
import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData;
import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData.AlterReplicaLogDir;
import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopic;
import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData.AlterReplicaLogDirPartitionResult;
import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData.AlterReplicaLogDirTopicResult;
import org.apache.kafka.common.message.AlterUserScramCredentialsRequestData;
import org.apache.kafka.common.message.ApiVersionsResponseData.FinalizedFeatureKey;
import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey;
import org.apache.kafka.common.message.CreateAclsRequestData;
import org.apache.kafka.common.message.CreateAclsRequestData.AclCreation;
import org.apache.kafka.common.message.CreateAclsResponseData.AclCreationResult;
import org.apache.kafka.common.message.CreateDelegationTokenRequestData;
import org.apache.kafka.common.message.CreateDelegationTokenRequestData.CreatableRenewers;
import org.apache.kafka.common.message.CreateDelegationTokenResponseData;
import org.apache.kafka.common.message.CreatePartitionsRequestData;
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsAssignment;
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic;
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopicCollection;
import org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult;
import org.apache.kafka.common.message.CreateTopicsRequestData;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCollection;
import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicConfigs;
import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult;
import org.apache.kafka.common.message.DeleteAclsRequestData;
import org.apache.kafka.common.message.DeleteAclsRequestData.DeleteAclsFilter;
import org.apache.kafka.common.message.DeleteAclsResponseData;
import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult;
import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsMatchingAcl;
import org.apache.kafka.common.message.DeleteRecordsRequestData;
import org.apache.kafka.common.message.DeleteRecordsRequestData.DeleteRecordsPartition;
import org.apache.kafka.common.message.DeleteRecordsRequestData.DeleteRecordsTopic;
import org.apache.kafka.common.message.DeleteRecordsResponseData;
import org.apache.kafka.common.message.DeleteRecordsResponseData.DeleteRecordsTopicResult;
import org.apache.kafka.common.message.DeleteTopicsRequestData;
import org.apache.kafka.common.message.DeleteTopicsRequestData.DeleteTopicState;
import org.apache.kafka.common.message.DeleteTopicsResponseData.DeletableTopicResult;
import org.apache.kafka.common.message.DescribeClusterRequestData;
import org.apache.kafka.common.message.DescribeConfigsRequestData;
import org.apache.kafka.common.message.DescribeConfigsResponseData;
import org.apache.kafka.common.message.DescribeLogDirsRequestData;
import org.apache.kafka.common.message.DescribeLogDirsRequestData.DescribableLogDirTopic;
import org.apache.kafka.common.message.DescribeLogDirsResponseData;
import org.apache.kafka.common.message.DescribeQuorumResponseData;
import org.apache.kafka.common.message.DescribeUserScramCredentialsRequestData;
import org.apache.kafka.common.message.DescribeUserScramCredentialsRequestData.UserName;
import org.apache.kafka.common.message.DescribeUserScramCredentialsResponseData;
import org.apache.kafka.common.message.ExpireDelegationTokenRequestData;
import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity;
import org.apache.kafka.common.message.ListGroupsRequestData;
import org.apache.kafka.common.message.ListGroupsResponseData;
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition;
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse;
import org.apache.kafka.common.message.ListPartitionReassignmentsRequestData;
import org.apache.kafka.common.message.MetadataRequestData;
import org.apache.kafka.common.message.RenewDelegationTokenRequestData;
import org.apache.kafka.common.message.UnregisterBrokerRequestData;
import org.apache.kafka.common.message.UpdateFeaturesRequestData;
import org.apache.kafka.common.message.UpdateFeaturesResponseData.UpdatableFeatureResult;
import org.apache.kafka.common.metrics.KafkaMetricsContext;
import org.apache.kafka.common.metrics.MetricConfig;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.metrics.MetricsContext;
import org.apache.kafka.common.metrics.MetricsReporter;
import org.apache.kafka.common.metrics.Sensor;
import org.apache.kafka.common.network.ChannelBuilder;
import org.apache.kafka.common.network.Selector;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.quota.ClientQuotaAlteration;
import org.apache.kafka.common.quota.ClientQuotaEntity;
import org.apache.kafka.common.quota.ClientQuotaFilter;
import org.apache.kafka.common.requests.AbstractRequest;
import org.apache.kafka.common.requests.AbstractResponse;
import org.apache.kafka.common.requests.AlterClientQuotasRequest;
import org.apache.kafka.common.requests.AlterClientQuotasResponse;
import org.apache.kafka.common.requests.AlterConfigsRequest;
import org.apache.kafka.common.requests.AlterConfigsResponse;
import org.apache.kafka.common.requests.AlterPartitionReassignmentsRequest;
import org.apache.kafka.common.requests.AlterPartitionReassignmentsResponse;
import org.apache.kafka.common.requests.AlterReplicaLogDirsRequest;
import org.apache.kafka.common.requests.AlterReplicaLogDirsResponse;
import org.apache.kafka.common.requests.AlterUserScramCredentialsRequest;
import org.apache.kafka.common.requests.AlterUserScramCredentialsResponse;
import org.apache.kafka.common.requests.ApiError;
import org.apache.kafka.common.requests.ApiVersionsRequest;
import org.apache.kafka.common.requests.ApiVersionsResponse;
import org.apache.kafka.common.requests.CreateAclsRequest;
import org.apache.kafka.common.requests.CreateAclsResponse;
import org.apache.kafka.common.requests.CreateDelegationTokenRequest;
import org.apache.kafka.common.requests.CreateDelegationTokenResponse;
import org.apache.kafka.common.requests.CreatePartitionsRequest;
import org.apache.kafka.common.requests.CreatePartitionsResponse;
import org.apache.kafka.common.requests.CreateTopicsRequest;
import org.apache.kafka.common.requests.CreateTopicsResponse;
import org.apache.kafka.common.requests.DeleteAclsRequest;
import org.apache.kafka.common.requests.DeleteAclsResponse;
import org.apache.kafka.common.requests.DeleteRecordsRequest;
import org.apache.kafka.common.requests.DeleteRecordsResponse;
import org.apache.kafka.common.requests.DeleteTopicsRequest;
import org.apache.kafka.common.requests.DeleteTopicsResponse;
import org.apache.kafka.common.requests.DescribeAclsRequest;
import org.apache.kafka.common.requests.DescribeAclsResponse;
import org.apache.kafka.common.requests.DescribeClientQuotasRequest;
import org.apache.kafka.common.requests.DescribeClientQuotasResponse;
import org.apache.kafka.common.requests.DescribeClusterRequest;
import org.apache.kafka.common.requests.DescribeClusterResponse;
import org.apache.kafka.common.requests.DescribeConfigsRequest;
import org.apache.kafka.common.requests.DescribeConfigsResponse;
import org.apache.kafka.common.requests.DescribeDelegationTokenRequest;
import org.apache.kafka.common.requests.DescribeDelegationTokenResponse;
import org.apache.kafka.common.requests.DescribeLogDirsRequest;
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
import org.apache.kafka.common.requests.DescribeUserScramCredentialsRequest;
import org.apache.kafka.common.requests.DescribeUserScramCredentialsResponse;
import org.apache.kafka.common.requests.DescribeQuorumRequest;
import org.apache.kafka.common.requests.DescribeQuorumRequest.Builder;
import org.apache.kafka.common.requests.DescribeQuorumResponse;
import org.apache.kafka.common.requests.ElectLeadersRequest;
import org.apache.kafka.common.requests.ElectLeadersResponse;
import org.apache.kafka.common.requests.ExpireDelegationTokenRequest;
import org.apache.kafka.common.requests.ExpireDelegationTokenResponse;
import org.apache.kafka.common.requests.IncrementalAlterConfigsRequest;
import org.apache.kafka.common.requests.IncrementalAlterConfigsResponse;
import org.apache.kafka.common.requests.JoinGroupRequest;
import org.apache.kafka.common.requests.ListGroupsRequest;
import org.apache.kafka.common.requests.ListGroupsResponse;
import org.apache.kafka.common.requests.ListOffsetsRequest;
import org.apache.kafka.common.requests.ListOffsetsResponse;
import org.apache.kafka.common.requests.ListPartitionReassignmentsRequest;
import org.apache.kafka.common.requests.ListPartitionReassignmentsResponse;
import org.apache.kafka.common.requests.MetadataRequest;
import org.apache.kafka.common.requests.MetadataResponse;
import org.apache.kafka.common.requests.RenewDelegationTokenRequest;
import org.apache.kafka.common.requests.RenewDelegationTokenResponse;
import org.apache.kafka.common.requests.UnregisterBrokerRequest;
import org.apache.kafka.common.requests.UnregisterBrokerResponse;
import org.apache.kafka.common.requests.UpdateFeaturesRequest;
import org.apache.kafka.common.requests.UpdateFeaturesResponse;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.security.scram.internals.ScramFormatter;
import org.apache.kafka.common.security.token.delegation.DelegationToken;
import org.apache.kafka.common.security.token.delegation.TokenInformation;
import org.apache.kafka.common.utils.AppInfoParser;
import org.apache.kafka.common.utils.KafkaThread;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.ProducerIdAndEpoch;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
// ** Added by Superstream
import org.apache.kafka.common.superstream.Consts;
import org.apache.kafka.common.superstream.Superstream;
// Added by Superstream **
import java.net.InetSocketAddress;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.kafka.common.internals.Topic.CLUSTER_METADATA_TOPIC_NAME;
import static org.apache.kafka.common.internals.Topic.CLUSTER_METADATA_TOPIC_PARTITION;
import static org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignablePartition;
import static org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignablePartitionResponse;
import static org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignableTopicResponse;
import static org.apache.kafka.common.message.ListPartitionReassignmentsRequestData.ListPartitionReassignmentsTopics;
import static org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingPartitionReassignment;
import static org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingTopicReassignment;
import static org.apache.kafka.common.requests.MetadataRequest.convertToMetadataRequestTopic;
import static org.apache.kafka.common.requests.MetadataRequest.convertTopicIdsToMetadataRequestTopic;
import static org.apache.kafka.common.utils.Utils.closeQuietly;
/**
* The default implementation of {@link Admin}. An instance of this class is created by invoking one of the
* {@code create()} methods in {@code AdminClient}. Users should not refer to this class directly.
*
* <p>
* This class is thread-safe.
* </p>
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class KafkaAdminClient extends AdminClient {
/**
* The next integer to use to name a KafkaAdminClient which the user hasn't specified an explicit name for.
*/
private static final AtomicInteger ADMIN_CLIENT_ID_SEQUENCE = new AtomicInteger(1);
/**
* The prefix to use for the JMX metrics for this class
*/
private static final String JMX_PREFIX = "kafka.admin.client";
/**
* An invalid shutdown time which indicates that a shutdown has not yet been performed.
*/
private static final long INVALID_SHUTDOWN_TIME = -1;
/**
* The default reason for a LeaveGroupRequest.
*/
static final String DEFAULT_LEAVE_GROUP_REASON = "member was removed by an admin";
/**
* Thread name prefix for admin client network thread
*/
static final String NETWORK_THREAD_PREFIX = "kafka-admin-client-thread";
private final Logger log;
private final LogContext logContext;
/**
* The default timeout to use for an operation.
*/
private final int defaultApiTimeoutMs;
/**
* The timeout to use for a single request.
*/
private final int requestTimeoutMs;
/**
* The name of this AdminClient instance.
*/
private final String clientId;
/**
* Provides the time.
*/
private final Time time;
/**
* The cluster metadata manager used by the KafkaClient.
*/
private final AdminMetadataManager metadataManager;
/**
* The metrics for this KafkaAdminClient.
*/
final Metrics metrics;
/**
* The network client to use.
*/
private final KafkaClient client;
/**
* The runnable used in the service thread for this admin client.
*/
private final AdminClientRunnable runnable;
/**
* The network service thread for this admin client.
*/
private final Thread thread;
/**
* During a close operation, this is the time at which we will time out all pending operations
* and force the RPC thread to exit. If the admin client is not closing, this will be 0.
*/
private final AtomicLong hardShutdownTimeMs = new AtomicLong(INVALID_SHUTDOWN_TIME);
/**
* A factory which creates TimeoutProcessors for the RPC thread.
*/
private final TimeoutProcessorFactory timeoutProcessorFactory;
private final int maxRetries;
private final long retryBackoffMs;
// ** Added by Superstream
public Superstream superstreamConnection;
public void configureSuperstream(Map<String, ?> configs, AdminClientConfig fullClientConfig) {
Superstream superstreamConn = (Superstream) configs.get(Consts.superstreamConnectionKey);
if (superstreamConn != null) {
this.superstreamConnection = superstreamConn;
this.superstreamConnection.setFullClientConfigs(fullClientConfig.getValues());
try{
this.superstreamConnection.waitForSuperstreamConfigs(fullClientConfig);
} catch (InterruptedException e) {
this.superstreamConnection.getSuperstreamPrintStream().println("Error waiting for admin client Superstream configs: " + e.getMessage());
}
}
}
// Added by Superstream **
/**
* Get or create a list value from a map.
*
* @param map The map to get or create the element from.
* @param key The key.
* @param <K> The key type.
* @param <V> The value type.
* @return The list value.
*/
static <K, V> List<V> getOrCreateListValue(Map<K, List<V>> map, K key) {
return map.computeIfAbsent(key, k -> new LinkedList<>());
}
/**
* Send an exception to every element in a collection of KafkaFutureImpls.
*
* @param futures The collection of KafkaFutureImpl objects.
* @param exc The exception
* @param <T> The KafkaFutureImpl result type.
*/
private static <T> void completeAllExceptionally(Collection<KafkaFutureImpl<T>> futures, Throwable exc) {
completeAllExceptionally(futures.stream(), exc);
}
/**
* Send an exception to all futures in the provided stream
*
* @param futures The stream of KafkaFutureImpl objects.
* @param exc The exception
* @param <T> The KafkaFutureImpl result type.
*/
private static <T> void completeAllExceptionally(Stream<KafkaFutureImpl<T>> futures, Throwable exc) {
futures.forEach(future -> future.completeExceptionally(exc));
}
/**
* Get the current time remaining before a deadline as an integer.
*
* @param now The current time in milliseconds.
* @param deadlineMs The deadline time in milliseconds.
* @return The time delta in milliseconds.
*/
static int calcTimeoutMsRemainingAsInt(long now, long deadlineMs) {
long deltaMs = deadlineMs - now;
if (deltaMs > Integer.MAX_VALUE)
deltaMs = Integer.MAX_VALUE;
else if (deltaMs < Integer.MIN_VALUE)
deltaMs = Integer.MIN_VALUE;
return (int) deltaMs;
}
/**
* Generate the client id based on the configuration.
*
* @param config The configuration
*
* @return The client id
*/
static String generateClientId(AdminClientConfig config) {
String clientId = config.getString(AdminClientConfig.CLIENT_ID_CONFIG);
if (!clientId.isEmpty())
return clientId;
return "adminclient-" + ADMIN_CLIENT_ID_SEQUENCE.getAndIncrement();
}
String getClientId() {
return clientId;
}
/**
* Get the deadline for a particular call.
*
* @param now The current time in milliseconds.
* @param optionTimeoutMs The timeout option given by the user.
*
* @return The deadline in milliseconds.
*/
private long calcDeadlineMs(long now, Integer optionTimeoutMs) {
if (optionTimeoutMs != null)
return now + Math.max(0, optionTimeoutMs);
return now + defaultApiTimeoutMs;
}
/**
* Pretty-print an exception.
*
* @param throwable The exception.
*
* @return A compact human-readable string.
*/
static String prettyPrintException(Throwable throwable) {
if (throwable == null)
return "Null exception.";
if (throwable.getMessage() != null) {
return throwable.getClass().getSimpleName() + ": " + throwable.getMessage();
}
return throwable.getClass().getSimpleName();
}
static KafkaAdminClient createInternal(AdminClientConfig config, TimeoutProcessorFactory timeoutProcessorFactory) {
return createInternal(config, timeoutProcessorFactory, null);
}
static KafkaAdminClient createInternal(AdminClientConfig config, TimeoutProcessorFactory timeoutProcessorFactory,
HostResolver hostResolver) {
Metrics metrics = null;
NetworkClient networkClient = null;
Time time = Time.SYSTEM;
String clientId = generateClientId(config);
ChannelBuilder channelBuilder = null;
Selector selector = null;
ApiVersions apiVersions = new ApiVersions();
LogContext logContext = createLogContext(clientId);
try {
// Since we only request node information, it's safe to pass true for allowAutoTopicCreation (and it
// simplifies communication with older brokers)
AdminMetadataManager metadataManager = new AdminMetadataManager(logContext,
config.getLong(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG),
config.getLong(AdminClientConfig.METADATA_MAX_AGE_CONFIG));
List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(
config.getList(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG),
config.getString(AdminClientConfig.CLIENT_DNS_LOOKUP_CONFIG));
metadataManager.update(Cluster.bootstrap(addresses), time.milliseconds());
List<MetricsReporter> reporters = CommonClientConfigs.metricsReporters(clientId, config);
Map<String, String> metricTags = Collections.singletonMap("client-id", clientId);
MetricConfig metricConfig = new MetricConfig().samples(config.getInt(AdminClientConfig.METRICS_NUM_SAMPLES_CONFIG))
.timeWindow(config.getLong(AdminClientConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS)
.recordLevel(Sensor.RecordingLevel.forName(config.getString(AdminClientConfig.METRICS_RECORDING_LEVEL_CONFIG)))
.tags(metricTags);
MetricsContext metricsContext = new KafkaMetricsContext(JMX_PREFIX,
config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX));
metrics = new Metrics(metricConfig, reporters, time, metricsContext);
String metricGrpPrefix = "admin-client";
channelBuilder = ClientUtils.createChannelBuilder(config, time, logContext);
selector = new Selector(config.getLong(AdminClientConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG),
metrics, time, metricGrpPrefix, channelBuilder, logContext);
networkClient = new NetworkClient(
metadataManager.updater(),
null,
selector,
clientId,
1,
config.getLong(AdminClientConfig.RECONNECT_BACKOFF_MS_CONFIG),
config.getLong(AdminClientConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG),
config.getInt(AdminClientConfig.SEND_BUFFER_CONFIG),
config.getInt(AdminClientConfig.RECEIVE_BUFFER_CONFIG),
(int) TimeUnit.HOURS.toMillis(1),
config.getLong(AdminClientConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG),
config.getLong(AdminClientConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG),
time,
true,
apiVersions,
null,
logContext,
(hostResolver == null) ? new DefaultHostResolver() : hostResolver);
return new KafkaAdminClient(config, clientId, time, metadataManager, metrics, networkClient,
timeoutProcessorFactory, logContext);
} catch (Throwable exc) {
closeQuietly(metrics, "Metrics");
closeQuietly(networkClient, "NetworkClient");
closeQuietly(selector, "Selector");
closeQuietly(channelBuilder, "ChannelBuilder");
throw new KafkaException("Failed to create new KafkaAdminClient", exc);
}
}
static KafkaAdminClient createInternal(AdminClientConfig config,
AdminMetadataManager metadataManager,
KafkaClient client,
Time time) {
Metrics metrics = null;
String clientId = generateClientId(config);
try {
metrics = new Metrics(new MetricConfig(), new LinkedList<>(), time);
LogContext logContext = createLogContext(clientId);
return new KafkaAdminClient(config, clientId, time, metadataManager, metrics,
client, null, logContext);
} catch (Throwable exc) {
closeQuietly(metrics, "Metrics");
throw new KafkaException("Failed to create new KafkaAdminClient", exc);
}
}
static LogContext createLogContext(String clientId) {
return new LogContext("[AdminClient clientId=" + clientId + "] ");
}
private KafkaAdminClient(AdminClientConfig config,
String clientId,
Time time,
AdminMetadataManager metadataManager,
Metrics metrics,
KafkaClient client,
TimeoutProcessorFactory timeoutProcessorFactory,
LogContext logContext) {
// ** Added by Superstream
configureSuperstream(config.originals(), config);
// Added by Superstream **
this.clientId = clientId;
this.log = logContext.logger(KafkaAdminClient.class);
this.logContext = logContext;
this.requestTimeoutMs = config.getInt(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG);
this.defaultApiTimeoutMs = configureDefaultApiTimeoutMs(config);
this.time = time;
this.metadataManager = metadataManager;
this.metrics = metrics;
this.client = client;
this.runnable = new AdminClientRunnable();
String threadName = NETWORK_THREAD_PREFIX + " | " + clientId;
this.thread = new KafkaThread(threadName, runnable, true);
this.timeoutProcessorFactory = (timeoutProcessorFactory == null) ?
new TimeoutProcessorFactory() : timeoutProcessorFactory;
this.maxRetries = config.getInt(AdminClientConfig.RETRIES_CONFIG);
this.retryBackoffMs = config.getLong(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG);
config.logUnused();
AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds());
log.debug("Kafka admin client initialized");
thread.start();
}
/**
* If a default.api.timeout.ms has been explicitly specified, raise an error if it conflicts with request.timeout.ms.
* If no default.api.timeout.ms has been configured, then set its value as the max of the default and request.timeout.ms. Also we should probably log a warning.
* Otherwise, use the provided values for both configurations.
*
* @param config The configuration
*/
private int configureDefaultApiTimeoutMs(AdminClientConfig config) {
int requestTimeoutMs = config.getInt(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG);
int defaultApiTimeoutMs = config.getInt(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG);
if (defaultApiTimeoutMs < requestTimeoutMs) {
if (config.originals().containsKey(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG)) {
throw new ConfigException("The specified value of " + AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG +
" must be no smaller than the value of " + AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG + ".");
} else {
log.warn("Overriding the default value for {} ({}) with the explicitly configured request timeout {}",
AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, this.defaultApiTimeoutMs,
requestTimeoutMs);
return requestTimeoutMs;
}
}
return defaultApiTimeoutMs;
}
@Override
public void close(Duration timeout) {
long waitTimeMs = timeout.toMillis();
if (waitTimeMs < 0)
throw new IllegalArgumentException("The timeout cannot be negative.");
waitTimeMs = Math.min(TimeUnit.DAYS.toMillis(365), waitTimeMs); // Limit the timeout to a year.
long now = time.milliseconds();
long newHardShutdownTimeMs = now + waitTimeMs;
long prev = INVALID_SHUTDOWN_TIME;
while (true) {
if (hardShutdownTimeMs.compareAndSet(prev, newHardShutdownTimeMs)) {
if (prev == INVALID_SHUTDOWN_TIME) {
log.debug("Initiating close operation.");
} else {
log.debug("Moving hard shutdown time forward.");
}
client.wakeup(); // Wake the thread, if it is blocked inside poll().
break;
}
prev = hardShutdownTimeMs.get();
if (prev < newHardShutdownTimeMs) {
log.debug("Hard shutdown time is already earlier than requested.");
newHardShutdownTimeMs = prev;
break;
}
}
if (log.isDebugEnabled()) {
long deltaMs = Math.max(0, newHardShutdownTimeMs - time.milliseconds());
log.debug("Waiting for the I/O thread to exit. Hard shutdown in {} ms.", deltaMs);
}
try {
// close() can be called by AdminClient thread when it invokes callback. That will
// cause deadlock, so check for that condition.
if (Thread.currentThread() != thread) {
// Wait for the thread to be joined.
thread.join(waitTimeMs);
}
log.debug("Kafka admin client closed.");
} catch (InterruptedException e) {
log.debug("Interrupted while joining I/O thread", e);
Thread.currentThread().interrupt();
}
}
/**
* An interface for providing a node for a call.
*/
private interface NodeProvider {
Node provide();
}
private class MetadataUpdateNodeIdProvider implements NodeProvider {
@Override
public Node provide() {
return client.leastLoadedNode(time.milliseconds());
}
}
private class ConstantNodeIdProvider implements NodeProvider {
private final int nodeId;
ConstantNodeIdProvider(int nodeId) {
this.nodeId = nodeId;
}
@Override
public Node provide() {
if (metadataManager.isReady() &&
(metadataManager.nodeById(nodeId) != null)) {
return metadataManager.nodeById(nodeId);
}
// If we can't find the node with the given constant ID, we schedule a
// metadata update and hope it appears. This behavior is useful for avoiding
// flaky behavior in tests when the cluster is starting up and not all nodes
// have appeared.
metadataManager.requestUpdate();
return null;
}
}
/**
* Provides the controller node.
*/
private class ControllerNodeProvider implements NodeProvider {
@Override
public Node provide() {
if (metadataManager.isReady() &&
(metadataManager.controller() != null)) {
return metadataManager.controller();
}
metadataManager.requestUpdate();
return null;
}
}
/**
* Provides the least loaded node.
*/
private class LeastLoadedNodeProvider implements NodeProvider {
@Override
public Node provide() {
if (metadataManager.isReady()) {
// This may return null if all nodes are busy.
// In that case, we will postpone node assignment.
return client.leastLoadedNode(time.milliseconds());
}
metadataManager.requestUpdate();
return null;
}
}
abstract class Call {
private final boolean internal;
private final String callName;
private final long deadlineMs;
private final NodeProvider nodeProvider;
protected int tries;
private Node curNode = null;
private long nextAllowedTryMs;
Call(boolean internal,
String callName,
long nextAllowedTryMs,
int tries,
long deadlineMs,
NodeProvider nodeProvider
) {
this.internal = internal;
this.callName = callName;
this.nextAllowedTryMs = nextAllowedTryMs;
this.tries = tries;
this.deadlineMs = deadlineMs;
this.nodeProvider = nodeProvider;
}
Call(boolean internal, String callName, long deadlineMs, NodeProvider nodeProvider) {
this(internal, callName, 0, 0, deadlineMs, nodeProvider);
}
Call(String callName, long deadlineMs, NodeProvider nodeProvider) {
this(false, callName, 0, 0, deadlineMs, nodeProvider);
}
Call(String callName, long nextAllowedTryMs, int tries, long deadlineMs, NodeProvider nodeProvider) {
this(false, callName, nextAllowedTryMs, tries, deadlineMs, nodeProvider);
}
protected Node curNode() {
return curNode;
}
/**
* Handle a failure.
*
* Depending on what the exception is and how many times we have already tried, we may choose to
* fail the Call, or retry it. It is important to print the stack traces here in some cases,
* since they are not necessarily preserved in ApiVersionException objects.
*
* @param now The current time in milliseconds.
* @param throwable The failure exception.
*/
final void fail(long now, Throwable throwable) {
if (curNode != null) {
runnable.nodeReadyDeadlines.remove(curNode);
curNode = null;
}
// If the admin client is closing, we can't retry.
if (runnable.closing) {
handleFailure(throwable);
return;
}
// If this is an UnsupportedVersionException that we can retry, do so. Note that a
// protocol downgrade will not count against the total number of retries we get for
// this RPC. That is why 'tries' is not incremented.
if ((throwable instanceof UnsupportedVersionException) &&
handleUnsupportedVersionException((UnsupportedVersionException) throwable)) {
log.debug("{} attempting protocol downgrade and then retry.", this);
runnable.pendingCalls.add(this);
return;
}
tries++;
nextAllowedTryMs = now + retryBackoffMs;
// If the call has timed out, fail.
if (calcTimeoutMsRemainingAsInt(now, deadlineMs) <= 0) {
handleTimeoutFailure(now, throwable);
return;
}
// If the exception is not retriable, fail.
if (!(throwable instanceof RetriableException)) {
if (log.isDebugEnabled()) {
log.debug("{} failed with non-retriable exception after {} attempt(s)", this, tries,
new Exception(prettyPrintException(throwable)));
}
handleFailure(throwable);
return;
}
// If we are out of retries, fail.
if (tries > maxRetries) {
handleTimeoutFailure(now, throwable);
return;
}
if (log.isDebugEnabled()) {
log.debug("{} failed: {}. Beginning retry #{}",
this, prettyPrintException(throwable), tries);
}
maybeRetry(now, throwable);
}
void maybeRetry(long now, Throwable throwable) {
runnable.pendingCalls.add(this);
}
private void handleTimeoutFailure(long now, Throwable cause) {
if (log.isDebugEnabled()) {
log.debug("{} timed out at {} after {} attempt(s)", this, now, tries,
new Exception(prettyPrintException(cause)));
}
if (cause instanceof TimeoutException) {
handleFailure(cause);
} else {
handleFailure(new TimeoutException(this + " timed out at " + now
+ " after " + tries + " attempt(s)", cause));
}
}
/**
* Create an AbstractRequest.Builder for this Call.
*
* @param timeoutMs The timeout in milliseconds.
*
* @return The AbstractRequest builder.
*/
abstract AbstractRequest.Builder<?> createRequest(int timeoutMs);
/**
* Process the call response.
*
* @param abstractResponse The AbstractResponse.
*
*/
abstract void handleResponse(AbstractResponse abstractResponse);
/**
* Handle a failure. This will only be called if the failure exception was not
* retriable, or if we hit a timeout.
*
* @param throwable The exception.
*/
abstract void handleFailure(Throwable throwable);
/**
* Handle an UnsupportedVersionException.
*
* @param exception The exception.
*
* @return True if the exception can be handled; false otherwise.
*/
boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
return false;
}
@Override
public String toString() {
return "Call(callName=" + callName + ", deadlineMs=" + deadlineMs +
", tries=" + tries + ", nextAllowedTryMs=" + nextAllowedTryMs + ")";
}
public boolean isInternal() {
return internal;
}
}
static class TimeoutProcessorFactory {
TimeoutProcessor create(long now) {
return new TimeoutProcessor(now);
}
}
static class TimeoutProcessor {
/**
* The current time in milliseconds.
*/
private final long now;
/**
* The number of milliseconds until the next timeout.
*/
private int nextTimeoutMs;
/**
* Create a new timeout processor.
*
* @param now The current time in milliseconds since the epoch.
*/
TimeoutProcessor(long now) {
this.now = now;
this.nextTimeoutMs = Integer.MAX_VALUE;
}
/**
* Check for calls which have timed out.
* Timed out calls will be removed and failed.
* The remaining milliseconds until the next timeout will be updated.
*
* @param calls The collection of calls.
*
* @return The number of calls which were timed out.
*/
int handleTimeouts(Collection<Call> calls, String msg) {
int numTimedOut = 0;
for (Iterator<Call> iter = calls.iterator(); iter.hasNext(); ) {
Call call = iter.next();
int remainingMs = calcTimeoutMsRemainingAsInt(now, call.deadlineMs);
if (remainingMs < 0) {
call.fail(now, new TimeoutException(msg + " Call: " + call.callName));
iter.remove();
numTimedOut++;
} else {
nextTimeoutMs = Math.min(nextTimeoutMs, remainingMs);
}
}
return numTimedOut;
}
/**
* Check whether a call should be timed out.
* The remaining milliseconds until the next timeout will be updated.
*
* @param call The call.
*
* @return True if the call should be timed out.
*/
boolean callHasExpired(Call call) {
int remainingMs = calcTimeoutMsRemainingAsInt(now, call.deadlineMs);
if (remainingMs < 0)
return true;
nextTimeoutMs = Math.min(nextTimeoutMs, remainingMs);
return false;
}
int nextTimeoutMs() {
return nextTimeoutMs;
}
}
private final class AdminClientRunnable implements Runnable {
/**
* Calls which have not yet been assigned to a node.
* Only accessed from this thread.
*/
private final ArrayList<Call> pendingCalls = new ArrayList<>();
/**
* Maps nodes to calls that we want to send.
* Only accessed from this thread.
*/
private final Map<Node, List<Call>> callsToSend = new HashMap<>();
/**
* Maps node ID strings to calls that have been sent.
* Only accessed from this thread.
*/
private final Map<String, Call> callsInFlight = new HashMap<>();
/**
* Maps correlation IDs to calls that have been sent.
* Only accessed from this thread.
*/
private final Map<Integer, Call> correlationIdToCalls = new HashMap<>();
/**
* Pending calls. Protected by the object monitor.
*/
private final List<Call> newCalls = new LinkedList<>();
/**
* Maps node ID strings to their readiness deadlines. A node will appear in this
* map if there are callsToSend which are waiting for it to be ready, and there
* are no calls in flight using the node.
*/
private final Map<Node, Long> nodeReadyDeadlines = new HashMap<>();
/**
* Whether the admin client is closing.
*/
private volatile boolean closing = false;
/**
* Time out the elements in the pendingCalls list which are expired.
*
* @param processor The timeout processor.
*/
private void timeoutPendingCalls(TimeoutProcessor processor) {
int numTimedOut = processor.handleTimeouts(pendingCalls, "Timed out waiting for a node assignment.");
if (numTimedOut > 0)
log.debug("Timed out {} pending calls.", numTimedOut);
}
/**
* Time out calls which have been assigned to nodes.
*
* @param processor The timeout processor.
*/
private int timeoutCallsToSend(TimeoutProcessor processor) {
int numTimedOut = 0;
for (List<Call> callList : callsToSend.values()) {
numTimedOut += processor.handleTimeouts(callList,
"Timed out waiting to send the call.");
}
if (numTimedOut > 0)
log.debug("Timed out {} call(s) with assigned nodes.", numTimedOut);
return numTimedOut;
}
/**
* Drain all the calls from newCalls into pendingCalls.
*
* This function holds the lock for the minimum amount of time, to avoid blocking
* users of AdminClient who will also take the lock to add new calls.
*/
private synchronized void drainNewCalls() {
transitionToPendingAndClearList(newCalls);
}
/**
* Add some calls to pendingCalls, and then clear the input list.
* Also clears Call#curNode.
*
* @param calls The calls to add.
*/
private void transitionToPendingAndClearList(List<Call> calls) {
for (Call call : calls) {
call.curNode = null;
pendingCalls.add(call);
}
calls.clear();
}
/**
* Choose nodes for the calls in the pendingCalls list.
*
* @param now The current time in milliseconds.
* @return The minimum time until a call is ready to be retried if any of the pending
* calls are backing off after a failure
*/
private long maybeDrainPendingCalls(long now) {
long pollTimeout = Long.MAX_VALUE;
log.trace("Trying to choose nodes for {} at {}", pendingCalls, now);
Iterator<Call> pendingIter = pendingCalls.iterator();
while (pendingIter.hasNext()) {
Call call = pendingIter.next();
// If the call is being retried, await the proper backoff before finding the node
if (now < call.nextAllowedTryMs) {
pollTimeout = Math.min(pollTimeout, call.nextAllowedTryMs - now);
} else if (maybeDrainPendingCall(call, now)) {
pendingIter.remove();
}
}
return pollTimeout;
}
/**
* Check whether a pending call can be assigned a node. Return true if the pending call was either
* transferred to the callsToSend collection or if the call was failed. Return false if it
* should remain pending.
*/
private boolean maybeDrainPendingCall(Call call, long now) {
try {
Node node = call.nodeProvider.provide();
if (node != null) {
log.trace("Assigned {} to node {}", call, node);
call.curNode = node;
getOrCreateListValue(callsToSend, node).add(call);
return true;
} else {
log.trace("Unable to assign {} to a node.", call);
return false;
}
} catch (Throwable t) {
// Handle authentication errors while choosing nodes.
log.debug("Unable to choose node for {}", call, t);
call.fail(now, t);
return true;
}
}
/**
* Send the calls which are ready.
*
* @param now The current time in milliseconds.
* @return The minimum timeout we need for poll().
*/
private long sendEligibleCalls(long now) {
long pollTimeout = Long.MAX_VALUE;
for (Iterator<Map.Entry<Node, List<Call>>> iter = callsToSend.entrySet().iterator(); iter.hasNext(); ) {
Map.Entry<Node, List<Call>> entry = iter.next();
List<Call> calls = entry.getValue();
if (calls.isEmpty()) {
iter.remove();
continue;
}
Node node = entry.getKey();
if (callsInFlight.containsKey(node.idString())) {
log.trace("Still waiting for other calls to finish on node {}.", node);
nodeReadyDeadlines.remove(node);
continue;
}
if (!client.ready(node, now)) {
Long deadline = nodeReadyDeadlines.get(node);
if (deadline != null) {
if (now >= deadline) {
log.info("Disconnecting from {} and revoking {} node assignment(s) " +
"because the node is taking too long to become ready.",
node.idString(), calls.size());
transitionToPendingAndClearList(calls);
client.disconnect(node.idString());
nodeReadyDeadlines.remove(node);
iter.remove();
continue;
}
pollTimeout = Math.min(pollTimeout, deadline - now);
} else {
nodeReadyDeadlines.put(node, now + requestTimeoutMs);
}
long nodeTimeout = client.pollDelayMs(node, now);
pollTimeout = Math.min(pollTimeout, nodeTimeout);
log.trace("Client is not ready to send to {}. Must delay {} ms", node, nodeTimeout);
continue;
}
// Subtract the time we spent waiting for the node to become ready from
// the total request time.
int remainingRequestTime;
Long deadlineMs = nodeReadyDeadlines.remove(node);
if (deadlineMs == null) {
remainingRequestTime = requestTimeoutMs;
} else {
remainingRequestTime = calcTimeoutMsRemainingAsInt(now, deadlineMs);
}
while (!calls.isEmpty()) {
Call call = calls.remove(0);
int timeoutMs = Math.min(remainingRequestTime,
calcTimeoutMsRemainingAsInt(now, call.deadlineMs));
AbstractRequest.Builder<?> requestBuilder;
try {
requestBuilder = call.createRequest(timeoutMs);
} catch (Throwable t) {
call.fail(now, new KafkaException(String.format(
"Internal error sending %s to %s.", call.callName, node), t));
continue;
}
ClientRequest clientRequest = client.newClientRequest(node.idString(),
requestBuilder, now, true, timeoutMs, null);
log.debug("Sending {} to {}. correlationId={}, timeoutMs={}",
requestBuilder, node, clientRequest.correlationId(), timeoutMs);
client.send(clientRequest, now);
callsInFlight.put(node.idString(), call);
correlationIdToCalls.put(clientRequest.correlationId(), call);
break;
}
}
return pollTimeout;
}
/**
* Time out expired calls that are in flight.
*
* Calls that are in flight may have been partially or completely sent over the wire. They may
* even be in the process of being processed by the remote server. At the moment, our only option
* to time them out is to close the entire connection.
*
* @param processor The timeout processor.
*/
private void timeoutCallsInFlight(TimeoutProcessor processor) {
int numTimedOut = 0;
for (Map.Entry<String, Call> entry : callsInFlight.entrySet()) {
Call call = entry.getValue();
String nodeId = entry.getKey();
if (processor.callHasExpired(call)) {
log.info("Disconnecting from {} due to timeout while awaiting {}", nodeId, call);
client.disconnect(nodeId);
numTimedOut++;
// We don't remove anything from the callsInFlight data structure. Because the connection
// has been closed, the calls should be returned by the next client#poll(),
// and handled at that point.
}
}
if (numTimedOut > 0)
log.debug("Timed out {} call(s) in flight.", numTimedOut);
}
/**
* Handle responses from the server.
*
* @param now The current time in milliseconds.
* @param responses The latest responses from KafkaClient.
**/
private void handleResponses(long now, List<ClientResponse> responses) {
for (ClientResponse response : responses) {
int correlationId = response.requestHeader().correlationId();
Call call = correlationIdToCalls.get(correlationId);
if (call == null) {
// If the server returns information about a correlation ID we didn't use yet,
// an internal server error has occurred. Close the connection and log an error message.
log.error("Internal server error on {}: server returned information about unknown " +
"correlation ID {}, requestHeader = {}", response.destination(), correlationId,
response.requestHeader());
client.disconnect(response.destination());
continue;
}
// Stop tracking this call.
correlationIdToCalls.remove(correlationId);
if (!callsInFlight.remove(response.destination(), call)) {
log.error("Internal server error on {}: ignoring call {} in correlationIdToCall " +
"that did not exist in callsInFlight", response.destination(), call);
continue;
}
// Handle the result of the call. This may involve retrying the call, if we got a
// retriable exception.
if (response.versionMismatch() != null) {
call.fail(now, response.versionMismatch());
} else if (response.wasDisconnected()) {
AuthenticationException authException = client.authenticationException(call.curNode());
if (authException != null) {
call.fail(now, authException);
} else {
call.fail(now, new DisconnectException(String.format(
"Cancelled %s request with correlation id %s due to node %s being disconnected",
call.callName, correlationId, response.destination())));
}
} else {
try {
call.handleResponse(response.responseBody());
if (log.isTraceEnabled())
log.trace("{} got response {}", call, response.responseBody());
} catch (Throwable t) {
if (log.isTraceEnabled())
log.trace("{} handleResponse failed with {}", call, prettyPrintException(t));
call.fail(now, t);
}
}
}
}
/**
* Unassign calls that have not yet been sent based on some predicate. For example, this
* is used to reassign the calls that have been assigned to a disconnected node.
*
* @param shouldUnassign Condition for reassignment. If the predicate is true, then the calls will
* be put back in the pendingCalls collection and they will be reassigned
*/
private void unassignUnsentCalls(Predicate<Node> shouldUnassign) {
for (Iterator<Map.Entry<Node, List<Call>>> iter = callsToSend.entrySet().iterator(); iter.hasNext(); ) {
Map.Entry<Node, List<Call>> entry = iter.next();
Node node = entry.getKey();
List<Call> awaitingCalls = entry.getValue();
if (awaitingCalls.isEmpty()) {
iter.remove();
} else if (shouldUnassign.test(node)) {
nodeReadyDeadlines.remove(node);
transitionToPendingAndClearList(awaitingCalls);
iter.remove();
}
}
}
private boolean hasActiveExternalCalls(Collection<Call> calls) {
for (Call call : calls) {
if (!call.isInternal()) {
return true;
}
}
return false;
}
/**
* Return true if there are currently active external calls.
*/
private boolean hasActiveExternalCalls() {
if (hasActiveExternalCalls(pendingCalls)) {
return true;
}
for (List<Call> callList : callsToSend.values()) {
if (hasActiveExternalCalls(callList)) {
return true;
}
}
return hasActiveExternalCalls(correlationIdToCalls.values());
}
private boolean threadShouldExit(long now, long curHardShutdownTimeMs) {
if (!hasActiveExternalCalls()) {
log.trace("All work has been completed, and the I/O thread is now exiting.");
return true;
}
if (now >= curHardShutdownTimeMs) {
log.info("Forcing a hard I/O thread shutdown. Requests in progress will be aborted.");
return true;
}
log.debug("Hard shutdown in {} ms.", curHardShutdownTimeMs - now);
return false;
}
@Override
public void run() {
log.debug("Thread starting");
try {
processRequests();
} finally {
closing = true;
AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics);
int numTimedOut = 0;
TimeoutProcessor timeoutProcessor = new TimeoutProcessor(Long.MAX_VALUE);
synchronized (this) {
numTimedOut += timeoutProcessor.handleTimeouts(newCalls, "The AdminClient thread has exited.");
}
numTimedOut += timeoutProcessor.handleTimeouts(pendingCalls, "The AdminClient thread has exited.");
numTimedOut += timeoutCallsToSend(timeoutProcessor);
numTimedOut += timeoutProcessor.handleTimeouts(correlationIdToCalls.values(),
"The AdminClient thread has exited.");
if (numTimedOut > 0) {
log.info("Timed out {} remaining operation(s) during close.", numTimedOut);
}
closeQuietly(client, "KafkaClient");
closeQuietly(metrics, "Metrics");
log.debug("Exiting AdminClientRunnable thread.");
}
}
private void processRequests() {
long now = time.milliseconds();
while (true) {
// Copy newCalls into pendingCalls.
drainNewCalls();
// Check if the AdminClient thread should shut down.
long curHardShutdownTimeMs = hardShutdownTimeMs.get();
if ((curHardShutdownTimeMs != INVALID_SHUTDOWN_TIME) && threadShouldExit(now, curHardShutdownTimeMs))
break;
// Handle timeouts.
TimeoutProcessor timeoutProcessor = timeoutProcessorFactory.create(now);
timeoutPendingCalls(timeoutProcessor);
timeoutCallsToSend(timeoutProcessor);
timeoutCallsInFlight(timeoutProcessor);
long pollTimeout = Math.min(1200000, timeoutProcessor.nextTimeoutMs());
if (curHardShutdownTimeMs != INVALID_SHUTDOWN_TIME) {
pollTimeout = Math.min(pollTimeout, curHardShutdownTimeMs - now);
}
// Choose nodes for our pending calls.
pollTimeout = Math.min(pollTimeout, maybeDrainPendingCalls(now));
long metadataFetchDelayMs = metadataManager.metadataFetchDelayMs(now);
if (metadataFetchDelayMs == 0) {
metadataManager.transitionToUpdatePending(now);
Call metadataCall = makeMetadataCall(now);
// Create a new metadata fetch call and add it to the end of pendingCalls.
// Assign a node for just the new call (we handled the other pending nodes above).
if (!maybeDrainPendingCall(metadataCall, now))
pendingCalls.add(metadataCall);
}
pollTimeout = Math.min(pollTimeout, sendEligibleCalls(now));
if (metadataFetchDelayMs > 0) {
pollTimeout = Math.min(pollTimeout, metadataFetchDelayMs);
}
// Ensure that we use a small poll timeout if there are pending calls which need to be sent
if (!pendingCalls.isEmpty())
pollTimeout = Math.min(pollTimeout, retryBackoffMs);
// Wait for network responses.
log.trace("Entering KafkaClient#poll(timeout={})", pollTimeout);
List<ClientResponse> responses = client.poll(Math.max(0L, pollTimeout), now);
log.trace("KafkaClient#poll retrieved {} response(s)", responses.size());
// unassign calls to disconnected nodes
unassignUnsentCalls(client::connectionFailed);
// Update the current time and handle the latest responses.
now = time.milliseconds();
handleResponses(now, responses);
}
}
/**
* Queue a call for sending.
*
* If the AdminClient thread has exited, this will fail. Otherwise, it will succeed (even
* if the AdminClient is shutting down). This function should called when retrying an
* existing call.
*
* @param call The new call object.
* @param now The current time in milliseconds.
*/
void enqueue(Call call, long now) {
if (call.tries > maxRetries) {
log.debug("Max retries {} for {} reached", maxRetries, call);
call.handleTimeoutFailure(time.milliseconds(), new TimeoutException(
"Exceeded maxRetries after " + call.tries + " tries."));
return;
}
if (log.isDebugEnabled()) {
log.debug("Queueing {} with a timeout {} ms from now.", call,
Math.min(requestTimeoutMs, call.deadlineMs - now));
}
boolean accepted = false;
synchronized (this) {
if (!closing) {
newCalls.add(call);
accepted = true;
}
}
if (accepted) {
client.wakeup(); // wake the thread if it is in poll()
} else {
log.debug("The AdminClient thread has exited. Timing out {}.", call);
call.handleTimeoutFailure(time.milliseconds(),
new TimeoutException("The AdminClient thread has exited."));
}
}
/**
* Initiate a new call.
*
* This will fail if the AdminClient is scheduled to shut down.
*
* @param call The new call object.
* @param now The current time in milliseconds.
*/
void call(Call call, long now) {
if (hardShutdownTimeMs.get() != INVALID_SHUTDOWN_TIME) {
log.debug("The AdminClient is not accepting new calls. Timing out {}.", call);
call.handleTimeoutFailure(time.milliseconds(),
new TimeoutException("The AdminClient thread is not accepting new calls."));
} else {
enqueue(call, now);
}
}
/**
* Create a new metadata call.
*/
private Call makeMetadataCall(long now) {
return new Call(true, "fetchMetadata", calcDeadlineMs(now, requestTimeoutMs),
new MetadataUpdateNodeIdProvider()) {
@Override
public MetadataRequest.Builder createRequest(int timeoutMs) {
// Since this only requests node information, it's safe to pass true
// for allowAutoTopicCreation (and it simplifies communication with
// older brokers)
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(Collections.emptyList())
.setAllowAutoTopicCreation(true));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
long now = time.milliseconds();
metadataManager.update(response.buildCluster(), now);
// Unassign all unsent requests after a metadata refresh to allow for a new
// destination to be selected from the new metadata
unassignUnsentCalls(node -> true);
}
@Override
public void handleFailure(Throwable e) {
metadataManager.updateFailed(e);
}
};
}
}
/**
* Returns true if a topic name cannot be represented in an RPC. This function does NOT check
* whether the name is too long, contains invalid characters, etc. It is better to enforce
* those policies on the server, so that they can be changed in the future if needed.
*/
private static boolean topicNameIsUnrepresentable(String topicName) {
return topicName == null || topicName.isEmpty();
}
private static boolean topicIdIsUnrepresentable(Uuid topicId) {
return topicId == null || topicId == Uuid.ZERO_UUID;
}
// for testing
int numPendingCalls() {
return runnable.pendingCalls.size();
}
/**
* Fail futures in the given stream which are not done.
* Used when a response handler expected a result for some entity but no result was present.
*/
private static <K, V> void completeUnrealizedFutures(
Stream<Map.Entry<K, KafkaFutureImpl<V>>> futures,
Function<K, String> messageFormatter) {
futures.filter(entry -> !entry.getValue().isDone()).forEach(entry ->
entry.getValue().completeExceptionally(new ApiException(messageFormatter.apply(entry.getKey()))));
}
/**
* Fail futures in the given Map which were retried due to exceeding quota. We propagate
* the initial error back to the caller if the request timed out.
*/
private static <K, V> void maybeCompleteQuotaExceededException(
boolean shouldRetryOnQuotaViolation,
Throwable throwable,
Map<K, KafkaFutureImpl<V>> futures,
Map<K, ThrottlingQuotaExceededException> quotaExceededExceptions,
int throttleTimeDelta) {
if (shouldRetryOnQuotaViolation && throwable instanceof TimeoutException) {
quotaExceededExceptions.forEach((key, value) -> futures.get(key).completeExceptionally(
new ThrottlingQuotaExceededException(
Math.max(0, value.throttleTimeMs() - throttleTimeDelta),
value.getMessage())));
}
}
@Override
public CreateTopicsResult createTopics(final Collection<NewTopic> newTopics,
final CreateTopicsOptions options) {
final Map<String, KafkaFutureImpl<TopicMetadataAndConfig>> topicFutures = new HashMap<>(newTopics.size());
final CreatableTopicCollection topics = new CreatableTopicCollection();
for (NewTopic newTopic : newTopics) {
if (topicNameIsUnrepresentable(newTopic.name())) {
KafkaFutureImpl<TopicMetadataAndConfig> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
newTopic.name() + "' cannot be represented in a request."));
topicFutures.put(newTopic.name(), future);
} else if (!topicFutures.containsKey(newTopic.name())) {
topicFutures.put(newTopic.name(), new KafkaFutureImpl<>());
topics.add(newTopic.convertToCreatableTopic());
}
}
if (!topics.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getCreateTopicsCall(options, topicFutures, topics,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new CreateTopicsResult(new HashMap<>(topicFutures));
}
private Call getCreateTopicsCall(final CreateTopicsOptions options,
final Map<String, KafkaFutureImpl<TopicMetadataAndConfig>> futures,
final CreatableTopicCollection topics,
final Map<String, ThrottlingQuotaExceededException> quotaExceededExceptions,
final long now,
final long deadline) {
return new Call("createTopics", deadline, new ControllerNodeProvider()) {
@Override
public CreateTopicsRequest.Builder createRequest(int timeoutMs) {
return new CreateTopicsRequest.Builder(
new CreateTopicsRequestData()
.setTopics(topics)
.setTimeoutMs(timeoutMs)
.setValidateOnly(options.shouldValidateOnly()));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final CreateTopicsResponse response = (CreateTopicsResponse) abstractResponse;
final CreatableTopicCollection retryTopics = new CreatableTopicCollection();
final Map<String, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
for (CreatableTopicResult result : response.data().topics()) {
KafkaFutureImpl<TopicMetadataAndConfig> future = futures.get(result.name());
if (future == null) {
log.warn("Server response mentioned unknown topic {}", result.name());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(
response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(topics.find(result.name()).duplicate());
retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
TopicMetadataAndConfig topicMetadataAndConfig;
if (result.topicConfigErrorCode() != Errors.NONE.code()) {
topicMetadataAndConfig = new TopicMetadataAndConfig(
Errors.forCode(result.topicConfigErrorCode()).exception());
} else if (result.numPartitions() == CreateTopicsResult.UNKNOWN) {
topicMetadataAndConfig = new TopicMetadataAndConfig(new UnsupportedVersionException(
"Topic metadata and configs in CreateTopics response not supported"));
} else {
List<CreatableTopicConfigs> configs = result.configs();
Config topicConfig = new Config(configs.stream()
.map(this::configEntry)
.collect(Collectors.toSet()));
topicMetadataAndConfig = new TopicMetadataAndConfig(result.topicId(), result.numPartitions(),
result.replicationFactor(),
topicConfig);
}
future.complete(topicMetadataAndConfig);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(),
topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getCreateTopicsCall(options, futures, retryTopics,
retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
private ConfigEntry configEntry(CreatableTopicConfigs config) {
return new ConfigEntry(
config.name(),
config.value(),
configSource(DescribeConfigsResponse.ConfigSource.forId(config.configSource())),
config.isSensitive(),
config.readOnly(),
Collections.emptyList(),
null,
null);
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(),
throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
@Override
public DeleteTopicsResult deleteTopics(final TopicCollection topics,
final DeleteTopicsOptions options) {
if (topics instanceof TopicIdCollection)
return DeleteTopicsResult.ofTopicIds(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options));
else if (topics instanceof TopicNameCollection)
return DeleteTopicsResult.ofTopicNames(handleDeleteTopicsUsingNames(((TopicNameCollection) topics).topicNames(), options));
else
throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for deleteTopics.");
}
private Map<String, KafkaFuture<Void>> handleDeleteTopicsUsingNames(final Collection<String> topicNames,
final DeleteTopicsOptions options) {
final Map<String, KafkaFutureImpl<Void>> topicFutures = new HashMap<>(topicNames.size());
final List<String> validTopicNames = new ArrayList<>(topicNames.size());
for (String topicName : topicNames) {
if (topicNameIsUnrepresentable(topicName)) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
topicName + "' cannot be represented in a request."));
topicFutures.put(topicName, future);
} else if (!topicFutures.containsKey(topicName)) {
topicFutures.put(topicName, new KafkaFutureImpl<>());
validTopicNames.add(topicName);
}
}
if (!validTopicNames.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getDeleteTopicsCall(options, topicFutures, validTopicNames,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
private Map<Uuid, KafkaFuture<Void>> handleDeleteTopicsUsingIds(final Collection<Uuid> topicIds,
final DeleteTopicsOptions options) {
final Map<Uuid, KafkaFutureImpl<Void>> topicFutures = new HashMap<>(topicIds.size());
final List<Uuid> validTopicIds = new ArrayList<>(topicIds.size());
for (Uuid topicId : topicIds) {
if (topicId.equals(Uuid.ZERO_UUID)) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic ID '" +
topicId + "' cannot be represented in a request."));
topicFutures.put(topicId, future);
} else if (!topicFutures.containsKey(topicId)) {
topicFutures.put(topicId, new KafkaFutureImpl<>());
validTopicIds.add(topicId);
}
}
if (!validTopicIds.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getDeleteTopicsWithIdsCall(options, topicFutures, validTopicIds,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
private Call getDeleteTopicsCall(final DeleteTopicsOptions options,
final Map<String, KafkaFutureImpl<Void>> futures,
final List<String> topics,
final Map<String, ThrottlingQuotaExceededException> quotaExceededExceptions,
final long now,
final long deadline) {
return new Call("deleteTopics", deadline, new ControllerNodeProvider()) {
@Override
DeleteTopicsRequest.Builder createRequest(int timeoutMs) {
return new DeleteTopicsRequest.Builder(
new DeleteTopicsRequestData()
.setTopicNames(topics)
.setTimeoutMs(timeoutMs));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final DeleteTopicsResponse response = (DeleteTopicsResponse) abstractResponse;
final List<String> retryTopics = new ArrayList<>();
final Map<String, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
for (DeletableTopicResult result : response.data().responses()) {
KafkaFutureImpl<Void> future = futures.get(result.name());
if (future == null) {
log.warn("Server response mentioned unknown topic {}", result.name());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(
response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(result.name());
retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
future.complete(null);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(),
topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getDeleteTopicsCall(options, futures, retryTopics,
retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(),
throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
private Call getDeleteTopicsWithIdsCall(final DeleteTopicsOptions options,
final Map<Uuid, KafkaFutureImpl<Void>> futures,
final List<Uuid> topicIds,
final Map<Uuid, ThrottlingQuotaExceededException> quotaExceededExceptions,
final long now,
final long deadline) {
return new Call("deleteTopics", deadline, new ControllerNodeProvider()) {
@Override
DeleteTopicsRequest.Builder createRequest(int timeoutMs) {
return new DeleteTopicsRequest.Builder(
new DeleteTopicsRequestData()
.setTopics(topicIds.stream().map(
topic -> new DeleteTopicState().setTopicId(topic)).collect(Collectors.toList()))
.setTimeoutMs(timeoutMs));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final DeleteTopicsResponse response = (DeleteTopicsResponse) abstractResponse;
final List<Uuid> retryTopics = new ArrayList<>();
final Map<Uuid, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
for (DeletableTopicResult result : response.data().responses()) {
KafkaFutureImpl<Void> future = futures.get(result.topicId());
if (future == null) {
log.warn("Server response mentioned unknown topic ID {}", result.topicId());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(
response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(result.topicId());
retryTopicQuotaExceededExceptions.put(result.topicId(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
future.complete(null);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(),
topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getDeleteTopicsWithIdsCall(options, futures, retryTopics,
retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(),
throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
@Override
public ListTopicsResult listTopics(final ListTopicsOptions options) {
final KafkaFutureImpl<Map<String, TopicListing>> topicListingFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("listTopics", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
return MetadataRequest.Builder.allTopics();
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
Map<String, TopicListing> topicListing = new HashMap<>();
for (MetadataResponse.TopicMetadata topicMetadata : response.topicMetadata()) {
String topicName = topicMetadata.topic();
boolean isInternal = topicMetadata.isInternal();
if (!topicMetadata.isInternal() || options.shouldListInternal())
topicListing.put(topicName, new TopicListing(topicName, topicMetadata.topicId(), isInternal));
}
topicListingFuture.complete(topicListing);
}
@Override
void handleFailure(Throwable throwable) {
topicListingFuture.completeExceptionally(throwable);
}
}, now);
return new ListTopicsResult(topicListingFuture);
}
@Override
public DescribeTopicsResult describeTopics(final TopicCollection topics, DescribeTopicsOptions options) {
if (topics instanceof TopicIdCollection)
return DescribeTopicsResult.ofTopicIds(handleDescribeTopicsByIds(((TopicIdCollection) topics).topicIds(), options));
else if (topics instanceof TopicNameCollection)
return DescribeTopicsResult.ofTopicNames(handleDescribeTopicsByNames(((TopicNameCollection) topics).topicNames(), options));
else
throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for describeTopics.");
}
private Map<String, KafkaFuture<TopicDescription>> handleDescribeTopicsByNames(final Collection<String> topicNames, DescribeTopicsOptions options) {
final Map<String, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicNames.size());
final ArrayList<String> topicNamesList = new ArrayList<>();
for (String topicName : topicNames) {
if (topicNameIsUnrepresentable(topicName)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
topicName + "' cannot be represented in a request."));
topicFutures.put(topicName, future);
} else if (!topicFutures.containsKey(topicName)) {
topicFutures.put(topicName, new KafkaFutureImpl<>());
topicNamesList.add(topicName);
}
}
final long now = time.milliseconds();
Call call = new Call("describeTopics", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
private boolean supportsDisablingTopicCreation = true;
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
if (supportsDisablingTopicCreation)
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(convertToMetadataRequestTopic(topicNamesList))
.setAllowAutoTopicCreation(false)
.setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations()));
else
return MetadataRequest.Builder.allTopics();
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
// Handle server responses for particular topics.
Cluster cluster = response.buildCluster();
Map<String, Errors> errors = response.errors();
for (Map.Entry<String, KafkaFutureImpl<TopicDescription>> entry : topicFutures.entrySet()) {
String topicName = entry.getKey();
KafkaFutureImpl<TopicDescription> future = entry.getValue();
Errors topicError = errors.get(topicName);
if (topicError != null) {
future.completeExceptionally(topicError.exception());
continue;
}
if (!cluster.topics().contains(topicName)) {
future.completeExceptionally(new UnknownTopicOrPartitionException("Topic " + topicName + " not found."));
continue;
}
Uuid topicId = cluster.topicId(topicName);
Integer authorizedOperations = response.topicAuthorizedOperations(topicName).get();
TopicDescription topicDescription = getTopicDescriptionFromCluster(cluster, topicName, topicId, authorizedOperations);
future.complete(topicDescription);
}
}
@Override
boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
if (supportsDisablingTopicCreation) {
supportsDisablingTopicCreation = false;
return true;
}
return false;
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(topicFutures.values(), throwable);
}
};
if (!topicNamesList.isEmpty()) {
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
private Map<Uuid, KafkaFuture<TopicDescription>> handleDescribeTopicsByIds(Collection<Uuid> topicIds, DescribeTopicsOptions options) {
final Map<Uuid, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicIds.size());
final List<Uuid> topicIdsList = new ArrayList<>();
for (Uuid topicId : topicIds) {
if (topicIdIsUnrepresentable(topicId)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic id '" +
topicId + "' cannot be represented in a request."));
topicFutures.put(topicId, future);
} else if (!topicFutures.containsKey(topicId)) {
topicFutures.put(topicId, new KafkaFutureImpl<>());
topicIdsList.add(topicId);
}
}
final long now = time.milliseconds();
Call call = new Call("describeTopicsWithIds", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(convertTopicIdsToMetadataRequestTopic(topicIdsList))
.setAllowAutoTopicCreation(false)
.setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
// Handle server responses for particular topics.
Cluster cluster = response.buildCluster();
Map<Uuid, Errors> errors = response.errorsByTopicId();
for (Map.Entry<Uuid, KafkaFutureImpl<TopicDescription>> entry : topicFutures.entrySet()) {
Uuid topicId = entry.getKey();
KafkaFutureImpl<TopicDescription> future = entry.getValue();
String topicName = cluster.topicName(topicId);
if (topicName == null) {
future.completeExceptionally(new InvalidTopicException("TopicId " + topicId + " not found."));
continue;
}
Errors topicError = errors.get(topicId);
if (topicError != null) {
future.completeExceptionally(topicError.exception());
continue;
}
Integer authorizedOperations = response.topicAuthorizedOperations(topicName).get();
TopicDescription topicDescription = getTopicDescriptionFromCluster(cluster, topicName, topicId, authorizedOperations);
future.complete(topicDescription);
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(topicFutures.values(), throwable);
}
};
if (!topicIdsList.isEmpty()) {
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
private TopicDescription getTopicDescriptionFromCluster(Cluster cluster, String topicName, Uuid topicId,
Integer authorizedOperations) {
boolean isInternal = cluster.internalTopics().contains(topicName);
List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topicName);
List<TopicPartitionInfo> partitions = new ArrayList<>(partitionInfos.size());
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(
partitionInfo.partition(), leader(partitionInfo), Arrays.asList(partitionInfo.replicas()),
Arrays.asList(partitionInfo.inSyncReplicas()));
partitions.add(topicPartitionInfo);
}
partitions.sort(Comparator.comparingInt(TopicPartitionInfo::partition));
return new TopicDescription(topicName, isInternal, partitions, validAclOperations(authorizedOperations), topicId);
}
private Node leader(PartitionInfo partitionInfo) {
if (partitionInfo.leader() == null || partitionInfo.leader().id() == Node.noNode().id())
return null;
return partitionInfo.leader();
}
@Override
public DescribeClusterResult describeCluster(DescribeClusterOptions options) {
final KafkaFutureImpl<Collection<Node>> describeClusterFuture = new KafkaFutureImpl<>();
final KafkaFutureImpl<Node> controllerFuture = new KafkaFutureImpl<>();
final KafkaFutureImpl<String> clusterIdFuture = new KafkaFutureImpl<>();
final KafkaFutureImpl<Set<AclOperation>> authorizedOperationsFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("listNodes", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
private boolean useMetadataRequest = false;
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
if (!useMetadataRequest) {
return new DescribeClusterRequest.Builder(new DescribeClusterRequestData()
.setIncludeClusterAuthorizedOperations(
options.includeAuthorizedOperations()));
} else {
// Since this only requests node information, it's safe to pass true for allowAutoTopicCreation (and it
// simplifies communication with older brokers)
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(Collections.emptyList())
.setAllowAutoTopicCreation(true)
.setIncludeClusterAuthorizedOperations(
options.includeAuthorizedOperations()));
}
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
if (!useMetadataRequest) {
DescribeClusterResponse response = (DescribeClusterResponse) abstractResponse;
Errors error = Errors.forCode(response.data().errorCode());
if (error != Errors.NONE) {
ApiError apiError = new ApiError(error, response.data().errorMessage());
handleFailure(apiError.exception());
return;
}
Map<Integer, Node> nodes = response.nodes();
describeClusterFuture.complete(nodes.values());
// Controller is null if controller id is equal to NO_CONTROLLER_ID
controllerFuture.complete(nodes.get(response.data().controllerId()));
clusterIdFuture.complete(response.data().clusterId());
authorizedOperationsFuture.complete(
validAclOperations(response.data().clusterAuthorizedOperations()));
} else {
MetadataResponse response = (MetadataResponse) abstractResponse;
describeClusterFuture.complete(response.brokers());
controllerFuture.complete(controller(response));
clusterIdFuture.complete(response.clusterId());
authorizedOperationsFuture.complete(
validAclOperations(response.clusterAuthorizedOperations()));
}
}
private Node controller(MetadataResponse response) {
if (response.controller() == null || response.controller().id() == MetadataResponse.NO_CONTROLLER_ID)
return null;
return response.controller();
}
@Override
void handleFailure(Throwable throwable) {
describeClusterFuture.completeExceptionally(throwable);
controllerFuture.completeExceptionally(throwable);
clusterIdFuture.completeExceptionally(throwable);
authorizedOperationsFuture.completeExceptionally(throwable);
}
@Override
boolean handleUnsupportedVersionException(final UnsupportedVersionException exception) {
if (useMetadataRequest) {
return false;
}
useMetadataRequest = true;
return true;
}
}, now);
return new DescribeClusterResult(describeClusterFuture, controllerFuture, clusterIdFuture,
authorizedOperationsFuture);
}
@Override
public DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options) {
if (filter.isUnknown()) {
KafkaFutureImpl<Collection<AclBinding>> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidRequestException("The AclBindingFilter " +
"must not contain UNKNOWN elements."));
return new DescribeAclsResult(future);
}
final long now = time.milliseconds();
final KafkaFutureImpl<Collection<AclBinding>> future = new KafkaFutureImpl<>();
runnable.call(new Call("describeAcls", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
DescribeAclsRequest.Builder createRequest(int timeoutMs) {
return new DescribeAclsRequest.Builder(filter);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeAclsResponse response = (DescribeAclsResponse) abstractResponse;
if (response.error().isFailure()) {
future.completeExceptionally(response.error().exception());
} else {
future.complete(DescribeAclsResponse.aclBindings(response.acls()));
}
}
@Override
void handleFailure(Throwable throwable) {
future.completeExceptionally(throwable);
}
}, now);
return new DescribeAclsResult(future);
}
@Override
public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
final long now = time.milliseconds();
final Map<AclBinding, KafkaFutureImpl<Void>> futures = new HashMap<>();
final List<AclCreation> aclCreations = new ArrayList<>();
final List<AclBinding> aclBindingsSent = new ArrayList<>();
for (AclBinding acl : acls) {
if (futures.get(acl) == null) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
futures.put(acl, future);
String indefinite = acl.toFilter().findIndefiniteField();
if (indefinite == null) {
aclCreations.add(CreateAclsRequest.aclCreation(acl));
aclBindingsSent.add(acl);
} else {
future.completeExceptionally(new InvalidRequestException("Invalid ACL creation: " +
indefinite));
}
}
}
final CreateAclsRequestData data = new CreateAclsRequestData().setCreations(aclCreations);
runnable.call(new Call("createAcls", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
CreateAclsRequest.Builder createRequest(int timeoutMs) {
return new CreateAclsRequest.Builder(data);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
CreateAclsResponse response = (CreateAclsResponse) abstractResponse;
List<AclCreationResult> responses = response.results();
Iterator<AclCreationResult> iter = responses.iterator();
for (AclBinding aclBinding : aclBindingsSent) {
KafkaFutureImpl<Void> future = futures.get(aclBinding);
if (!iter.hasNext()) {
future.completeExceptionally(new UnknownServerException(
"The broker reported no creation result for the given ACL: " + aclBinding));
} else {
AclCreationResult creation = iter.next();
Errors error = Errors.forCode(creation.errorCode());
ApiError apiError = new ApiError(error, creation.errorMessage());
if (apiError.isFailure())
future.completeExceptionally(apiError.exception());
else
future.complete(null);
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return new CreateAclsResult(new HashMap<>(futures));
}
@Override
public DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options) {
final long now = time.milliseconds();
final Map<AclBindingFilter, KafkaFutureImpl<FilterResults>> futures = new HashMap<>();
final List<AclBindingFilter> aclBindingFiltersSent = new ArrayList<>();
final List<DeleteAclsFilter> deleteAclsFilters = new ArrayList<>();
for (AclBindingFilter filter : filters) {
if (futures.get(filter) == null) {
aclBindingFiltersSent.add(filter);
deleteAclsFilters.add(DeleteAclsRequest.deleteAclsFilter(filter));
futures.put(filter, new KafkaFutureImpl<>());
}
}
final DeleteAclsRequestData data = new DeleteAclsRequestData().setFilters(deleteAclsFilters);
runnable.call(new Call("deleteAcls", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
DeleteAclsRequest.Builder createRequest(int timeoutMs) {
return new DeleteAclsRequest.Builder(data);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DeleteAclsResponse response = (DeleteAclsResponse) abstractResponse;
List<DeleteAclsResponseData.DeleteAclsFilterResult> results = response.filterResults();
Iterator<DeleteAclsResponseData.DeleteAclsFilterResult> iter = results.iterator();
for (AclBindingFilter bindingFilter : aclBindingFiltersSent) {
KafkaFutureImpl<FilterResults> future = futures.get(bindingFilter);
if (!iter.hasNext()) {
future.completeExceptionally(new UnknownServerException(
"The broker reported no deletion result for the given filter."));
} else {
DeleteAclsFilterResult filterResult = iter.next();
ApiError error = new ApiError(Errors.forCode(filterResult.errorCode()), filterResult.errorMessage());
if (error.isFailure()) {
future.completeExceptionally(error.exception());
} else {
List<FilterResult> filterResults = new ArrayList<>();
for (DeleteAclsMatchingAcl matchingAcl : filterResult.matchingAcls()) {
ApiError aclError = new ApiError(Errors.forCode(matchingAcl.errorCode()),
matchingAcl.errorMessage());
AclBinding aclBinding = DeleteAclsResponse.aclBinding(matchingAcl);
filterResults.add(new FilterResult(aclBinding, aclError.exception()));
}
future.complete(new FilterResults(filterResults));
}
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return new DeleteAclsResult(new HashMap<>(futures));
}
@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
// Partition the requested config resources based on which broker they must be sent to with the
// null broker being used for config resources which can be obtained from any broker
final Map<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> brokerFutures = new HashMap<>(configResources.size());
for (ConfigResource resource : configResources) {
Integer broker = nodeFor(resource);
brokerFutures.compute(broker, (key, value) -> {
if (value == null) {
value = new HashMap<>();
}
value.put(resource, new KafkaFutureImpl<>());
return value;
});
}
final long now = time.milliseconds();
for (Map.Entry<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> entry : brokerFutures.entrySet()) {
Integer broker = entry.getKey();
Map<ConfigResource, KafkaFutureImpl<Config>> unified = entry.getValue();
runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()),
broker != null ? new ConstantNodeIdProvider(broker) : new LeastLoadedNodeProvider()) {
@Override
DescribeConfigsRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData()
.setResources(unified.keySet().stream()
.map(config ->
new DescribeConfigsRequestData.DescribeConfigsResource()
.setResourceName(config.name())
.setResourceType(config.type().id())
.setConfigurationKeys(null))
.collect(Collectors.toList()))
.setIncludeSynonyms(options.includeSynonyms())
.setIncludeDocumentation(options.includeDocumentation()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, DescribeConfigsResponseData.DescribeConfigsResult> entry : response.resultMap().entrySet()) {
ConfigResource configResource = entry.getKey();
DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult = entry.getValue();
KafkaFutureImpl<Config> future = unified.get(configResource);
if (future == null) {
if (broker != null) {
log.warn("The config {} in the response from broker {} is not in the request",
configResource, broker);
} else {
log.warn("The config {} in the response from the least loaded broker is not in the request",
configResource);
}
} else {
if (describeConfigsResult.errorCode() != Errors.NONE.code()) {
future.completeExceptionally(Errors.forCode(describeConfigsResult.errorCode())
.exception(describeConfigsResult.errorMessage()));
} else {
future.complete(describeConfigResult(describeConfigsResult));
}
}
}
completeUnrealizedFutures(
unified.entrySet().stream(),
configResource -> "The broker response did not contain a result for config resource " + configResource);
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(unified.values(), throwable);
}
}, now);
}
return new DescribeConfigsResult(new HashMap<>(brokerFutures.entrySet().stream()
.flatMap(x -> x.getValue().entrySet().stream())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))));
}
private Config describeConfigResult(DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult) {
return new Config(describeConfigsResult.configs().stream().map(config -> new ConfigEntry(
config.name(),
config.value(),
DescribeConfigsResponse.ConfigSource.forId(config.configSource()).source(),
config.isSensitive(),
config.readOnly(),
(config.synonyms().stream().map(synonym -> new ConfigEntry.ConfigSynonym(synonym.name(), synonym.value(),
DescribeConfigsResponse.ConfigSource.forId(synonym.source()).source()))).collect(Collectors.toList()),
DescribeConfigsResponse.ConfigType.forId(config.configType()).type(),
config.documentation()
)).collect(Collectors.toList()));
}
private ConfigEntry.ConfigSource configSource(DescribeConfigsResponse.ConfigSource source) {
ConfigEntry.ConfigSource configSource;
switch (source) {
case TOPIC_CONFIG:
configSource = ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG;
break;
case DYNAMIC_BROKER_CONFIG:
configSource = ConfigEntry.ConfigSource.DYNAMIC_BROKER_CONFIG;
break;
case DYNAMIC_DEFAULT_BROKER_CONFIG:
configSource = ConfigEntry.ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG;
break;
case STATIC_BROKER_CONFIG:
configSource = ConfigEntry.ConfigSource.STATIC_BROKER_CONFIG;
break;
case DYNAMIC_BROKER_LOGGER_CONFIG:
configSource = ConfigEntry.ConfigSource.DYNAMIC_BROKER_LOGGER_CONFIG;
break;
case DEFAULT_CONFIG:
configSource = ConfigEntry.ConfigSource.DEFAULT_CONFIG;
break;
default:
throw new IllegalArgumentException("Unexpected config source " + source);
}
return configSource;
}
@Override
@Deprecated
public AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options) {
final Map<ConfigResource, KafkaFutureImpl<Void>> allFutures = new HashMap<>();
// We must make a separate AlterConfigs request for every BROKER resource we want to alter
// and send the request to that specific broker. Other resources are grouped together into
// a single request that may be sent to any broker.
final Collection<ConfigResource> unifiedRequestResources = new ArrayList<>();
for (ConfigResource resource : configs.keySet()) {
Integer node = nodeFor(resource);
if (node != null) {
NodeProvider nodeProvider = new ConstantNodeIdProvider(node);
allFutures.putAll(alterConfigs(configs, options, Collections.singleton(resource), nodeProvider));
} else
unifiedRequestResources.add(resource);
}
if (!unifiedRequestResources.isEmpty())
allFutures.putAll(alterConfigs(configs, options, unifiedRequestResources, new LeastLoadedNodeProvider()));
return new AlterConfigsResult(new HashMap<>(allFutures));
}
private Map<ConfigResource, KafkaFutureImpl<Void>> alterConfigs(Map<ConfigResource, Config> configs,
final AlterConfigsOptions options,
Collection<ConfigResource> resources,
NodeProvider nodeProvider) {
final Map<ConfigResource, KafkaFutureImpl<Void>> futures = new HashMap<>();
final Map<ConfigResource, AlterConfigsRequest.Config> requestMap = new HashMap<>(resources.size());
for (ConfigResource resource : resources) {
List<AlterConfigsRequest.ConfigEntry> configEntries = new ArrayList<>();
for (ConfigEntry configEntry: configs.get(resource).entries())
configEntries.add(new AlterConfigsRequest.ConfigEntry(configEntry.name(), configEntry.value()));
requestMap.put(resource, new AlterConfigsRequest.Config(configEntries));
futures.put(resource, new KafkaFutureImpl<>());
}
final long now = time.milliseconds();
runnable.call(new Call("alterConfigs", calcDeadlineMs(now, options.timeoutMs()), nodeProvider) {
@Override
public AlterConfigsRequest.Builder createRequest(int timeoutMs) {
return new AlterConfigsRequest.Builder(requestMap, options.shouldValidateOnly());
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterConfigsResponse response = (AlterConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, KafkaFutureImpl<Void>> entry : futures.entrySet()) {
KafkaFutureImpl<Void> future = entry.getValue();
ApiException exception = response.errors().get(entry.getKey()).exception();
if (exception != null) {
future.completeExceptionally(exception);
} else {
future.complete(null);
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return futures;
}
@Override
public AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>> configs,
final AlterConfigsOptions options) {
final Map<ConfigResource, KafkaFutureImpl<Void>> allFutures = new HashMap<>();
// We must make a separate AlterConfigs request for every BROKER resource we want to alter
// and send the request to that specific broker. Other resources are grouped together into
// a single request that may be sent to any broker.
final Collection<ConfigResource> unifiedRequestResources = new ArrayList<>();
for (ConfigResource resource : configs.keySet()) {
Integer node = nodeFor(resource);
if (node != null) {
NodeProvider nodeProvider = new ConstantNodeIdProvider(node);
allFutures.putAll(incrementalAlterConfigs(configs, options, Collections.singleton(resource), nodeProvider));
} else
unifiedRequestResources.add(resource);
}
if (!unifiedRequestResources.isEmpty())
allFutures.putAll(incrementalAlterConfigs(configs, options, unifiedRequestResources, new LeastLoadedNodeProvider()));
return new AlterConfigsResult(new HashMap<>(allFutures));
}
private Map<ConfigResource, KafkaFutureImpl<Void>> incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>> configs,
final AlterConfigsOptions options,
Collection<ConfigResource> resources,
NodeProvider nodeProvider) {
final Map<ConfigResource, KafkaFutureImpl<Void>> futures = new HashMap<>();
for (ConfigResource resource : resources)
futures.put(resource, new KafkaFutureImpl<>());
final long now = time.milliseconds();
runnable.call(new Call("incrementalAlterConfigs", calcDeadlineMs(now, options.timeoutMs()), nodeProvider) {
@Override
public IncrementalAlterConfigsRequest.Builder createRequest(int timeoutMs) {
return new IncrementalAlterConfigsRequest.Builder(resources, configs, options.shouldValidateOnly());
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
IncrementalAlterConfigsResponse response = (IncrementalAlterConfigsResponse) abstractResponse;
Map<ConfigResource, ApiError> errors = IncrementalAlterConfigsResponse.fromResponseData(response.data());
for (Map.Entry<ConfigResource, KafkaFutureImpl<Void>> entry : futures.entrySet()) {
KafkaFutureImpl<Void> future = entry.getValue();
ApiException exception = errors.get(entry.getKey()).exception();
if (exception != null) {
future.completeExceptionally(exception);
} else {
future.complete(null);
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return futures;
}
@Override
public AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment, final AlterReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<Void>> futures = new HashMap<>(replicaAssignment.size());
for (TopicPartitionReplica replica : replicaAssignment.keySet())
futures.put(replica, new KafkaFutureImpl<>());
Map<Integer, AlterReplicaLogDirsRequestData> replicaAssignmentByBroker = new HashMap<>();
for (Map.Entry<TopicPartitionReplica, String> entry: replicaAssignment.entrySet()) {
TopicPartitionReplica replica = entry.getKey();
String logDir = entry.getValue();
int brokerId = replica.brokerId();
AlterReplicaLogDirsRequestData value = replicaAssignmentByBroker.computeIfAbsent(brokerId,
key -> new AlterReplicaLogDirsRequestData());
AlterReplicaLogDir alterReplicaLogDir = value.dirs().find(logDir);
if (alterReplicaLogDir == null) {
alterReplicaLogDir = new AlterReplicaLogDir();
alterReplicaLogDir.setPath(logDir);
value.dirs().add(alterReplicaLogDir);
}
AlterReplicaLogDirTopic alterReplicaLogDirTopic = alterReplicaLogDir.topics().find(replica.topic());
if (alterReplicaLogDirTopic == null) {
alterReplicaLogDirTopic = new AlterReplicaLogDirTopic().setName(replica.topic());
alterReplicaLogDir.topics().add(alterReplicaLogDirTopic);
}
alterReplicaLogDirTopic.partitions().add(replica.partition());
}
final long now = time.milliseconds();
for (Map.Entry<Integer, AlterReplicaLogDirsRequestData> entry: replicaAssignmentByBroker.entrySet()) {
final int brokerId = entry.getKey();
final AlterReplicaLogDirsRequestData assignment = entry.getValue();
runnable.call(new Call("alterReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()),
new ConstantNodeIdProvider(brokerId)) {
@Override
public AlterReplicaLogDirsRequest.Builder createRequest(int timeoutMs) {
return new AlterReplicaLogDirsRequest.Builder(assignment);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterReplicaLogDirsResponse response = (AlterReplicaLogDirsResponse) abstractResponse;
for (AlterReplicaLogDirTopicResult topicResult: response.data().results()) {
for (AlterReplicaLogDirPartitionResult partitionResult: topicResult.partitions()) {
TopicPartitionReplica replica = new TopicPartitionReplica(
topicResult.topicName(), partitionResult.partitionIndex(), brokerId);
KafkaFutureImpl<Void> future = futures.get(replica);
if (future == null) {
log.warn("The partition {} in the response from broker {} is not in the request",
new TopicPartition(topicResult.topicName(), partitionResult.partitionIndex()),
brokerId);
} else if (partitionResult.errorCode() == Errors.NONE.code()) {
future.complete(null);
} else {
future.completeExceptionally(Errors.forCode(partitionResult.errorCode()).exception());
}
}
}
// The server should send back a response for every replica. But do a sanity check anyway.
completeUnrealizedFutures(
futures.entrySet().stream().filter(entry -> entry.getKey().brokerId() == brokerId),
replica -> "The response from broker " + brokerId +
" did not contain a result for replica " + replica);
}
@Override
void handleFailure(Throwable throwable) {
// Only completes the futures of brokerId
completeAllExceptionally(
futures.entrySet().stream()
.filter(entry -> entry.getKey().brokerId() == brokerId)
.map(Map.Entry::getValue),
throwable);
}
}, now);
}
return new AlterReplicaLogDirsResult(new HashMap<>(futures));
}
@Override
public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) {
final Map<Integer, KafkaFutureImpl<Map<String, LogDirDescription>>> futures = new HashMap<>(brokers.size());
final long now = time.milliseconds();
for (final Integer brokerId : brokers) {
KafkaFutureImpl<Map<String, LogDirDescription>> future = new KafkaFutureImpl<>();
futures.put(brokerId, future);
runnable.call(new Call("describeLogDirs", calcDeadlineMs(now, options.timeoutMs()),
new ConstantNodeIdProvider(brokerId)) {
@Override
public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(null));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
Map<String, LogDirDescription> descriptions = logDirDescriptions(response);
if (descriptions.size() > 0) {
future.complete(descriptions);
} else {
// Up to v3 DescribeLogDirsResponse did not have an error code field, hence it defaults to None
Errors error = response.data().errorCode() == Errors.NONE.code()
? Errors.CLUSTER_AUTHORIZATION_FAILED
: Errors.forCode(response.data().errorCode());
future.completeExceptionally(error.exception());
}
}
@Override
void handleFailure(Throwable throwable) {
future.completeExceptionally(throwable);
}
}, now);
}
return new DescribeLogDirsResult(new HashMap<>(futures));
}
private static Map<String, LogDirDescription> logDirDescriptions(DescribeLogDirsResponse response) {
Map<String, LogDirDescription> result = new HashMap<>(response.data().results().size());
for (DescribeLogDirsResponseData.DescribeLogDirsResult logDirResult : response.data().results()) {
Map<TopicPartition, ReplicaInfo> replicaInfoMap = new HashMap<>();
for (DescribeLogDirsResponseData.DescribeLogDirsTopic t : logDirResult.topics()) {
for (DescribeLogDirsResponseData.DescribeLogDirsPartition p : t.partitions()) {
replicaInfoMap.put(
new TopicPartition(t.name(), p.partitionIndex()),
new ReplicaInfo(p.partitionSize(), p.offsetLag(), p.isFutureKey()));
}
}
result.put(logDirResult.logDir(), new LogDirDescription(
Errors.forCode(logDirResult.errorCode()).exception(),
replicaInfoMap,
logDirResult.totalBytes(),
logDirResult.usableBytes()));
}
return result;
}
@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> futures = new HashMap<>(replicas.size());
for (TopicPartitionReplica replica : replicas) {
futures.put(replica, new KafkaFutureImpl<>());
}
Map<Integer, DescribeLogDirsRequestData> partitionsByBroker = new HashMap<>();
for (TopicPartitionReplica replica: replicas) {
DescribeLogDirsRequestData requestData = partitionsByBroker.computeIfAbsent(replica.brokerId(),
brokerId -> new DescribeLogDirsRequestData());
DescribableLogDirTopic describableLogDirTopic = requestData.topics().find(replica.topic());
if (describableLogDirTopic == null) {
List<Integer> partitions = new ArrayList<>();
partitions.add(replica.partition());
describableLogDirTopic = new DescribableLogDirTopic().setTopic(replica.topic())
.setPartitions(partitions);
requestData.topics().add(describableLogDirTopic);
} else {
describableLogDirTopic.partitions().add(replica.partition());
}
}
final long now = time.milliseconds();
for (Map.Entry<Integer, DescribeLogDirsRequestData> entry: partitionsByBroker.entrySet()) {
final int brokerId = entry.getKey();
final DescribeLogDirsRequestData topicPartitions = entry.getValue();
final Map<TopicPartition, ReplicaLogDirInfo> replicaDirInfoByPartition = new HashMap<>();
for (DescribableLogDirTopic topicPartition: topicPartitions.topics()) {
for (Integer partitionId : topicPartition.partitions()) {
replicaDirInfoByPartition.put(new TopicPartition(topicPartition.topic(), partitionId), new ReplicaLogDirInfo());
}
}
runnable.call(new Call("describeReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()),
new ConstantNodeIdProvider(brokerId)) {
@Override
public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(topicPartitions);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
for (Map.Entry<String, LogDirDescription> responseEntry: logDirDescriptions(response).entrySet()) {
String logDir = responseEntry.getKey();
LogDirDescription logDirInfo = responseEntry.getValue();
// No replica info will be provided if the log directory is offline
if (logDirInfo.error() instanceof KafkaStorageException)
continue;
if (logDirInfo.error() != null)
handleFailure(new IllegalStateException(
"The error " + logDirInfo.error().getClass().getName() + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal"));
for (Map.Entry<TopicPartition, ReplicaInfo> replicaInfoEntry: logDirInfo.replicaInfos().entrySet()) {
TopicPartition tp = replicaInfoEntry.getKey();
ReplicaInfo replicaInfo = replicaInfoEntry.getValue();
ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp);
if (replicaLogDirInfo == null) {
log.warn("Server response from broker {} mentioned unknown partition {}", brokerId, tp);
} else if (replicaInfo.isFuture()) {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(),
replicaLogDirInfo.getCurrentReplicaOffsetLag(),
logDir,
replicaInfo.offsetLag()));
} else {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir,
replicaInfo.offsetLag(),
replicaLogDirInfo.getFutureReplicaLogDir(),
replicaLogDirInfo.getFutureReplicaOffsetLag()));
}
}
}
for (Map.Entry<TopicPartition, ReplicaLogDirInfo> entry: replicaDirInfoByPartition.entrySet()) {
TopicPartition tp = entry.getKey();
KafkaFutureImpl<ReplicaLogDirInfo> future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId));
future.complete(entry.getValue());
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
}
return new DescribeReplicaLogDirsResult(new HashMap<>(futures));
}
@Override
public CreatePartitionsResult createPartitions(final Map<String, NewPartitions> newPartitions,
final CreatePartitionsOptions options) {
final Map<String, KafkaFutureImpl<Void>> futures = new HashMap<>(newPartitions.size());
final CreatePartitionsTopicCollection topics = new CreatePartitionsTopicCollection(newPartitions.size());
for (Map.Entry<String, NewPartitions> entry : newPartitions.entrySet()) {
final String topic = entry.getKey();
final NewPartitions newPartition = entry.getValue();
List<List<Integer>> newAssignments = newPartition.assignments();
List<CreatePartitionsAssignment> assignments = newAssignments == null ? null :
newAssignments.stream()
.map(brokerIds -> new CreatePartitionsAssignment().setBrokerIds(brokerIds))
.collect(Collectors.toList());
topics.add(new CreatePartitionsTopic()
.setName(topic)
.setCount(newPartition.totalCount())
.setAssignments(assignments));
futures.put(topic, new KafkaFutureImpl<>());
}
if (!topics.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getCreatePartitionsCall(options, futures, topics,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new CreatePartitionsResult(new HashMap<>(futures));
}
private Call getCreatePartitionsCall(final CreatePartitionsOptions options,
final Map<String, KafkaFutureImpl<Void>> futures,
final CreatePartitionsTopicCollection topics,
final Map<String, ThrottlingQuotaExceededException> quotaExceededExceptions,
final long now,
final long deadline) {
return new Call("createPartitions", deadline, new ControllerNodeProvider()) {
@Override
public CreatePartitionsRequest.Builder createRequest(int timeoutMs) {
return new CreatePartitionsRequest.Builder(
new CreatePartitionsRequestData()
.setTopics(topics)
.setValidateOnly(options.validateOnly())
.setTimeoutMs(timeoutMs));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final CreatePartitionsResponse response = (CreatePartitionsResponse) abstractResponse;
final CreatePartitionsTopicCollection retryTopics = new CreatePartitionsTopicCollection();
final Map<String, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
for (CreatePartitionsTopicResult result : response.data().results()) {
KafkaFutureImpl<Void> future = futures.get(result.name());
if (future == null) {
log.warn("Server response mentioned unknown topic {}", result.name());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(
response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(topics.find(result.name()).duplicate());
retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
future.complete(null);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(),
topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getCreatePartitionsCall(options, futures, retryTopics,
retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(),
throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
@Override
public DeleteRecordsResult deleteRecords(final Map<TopicPartition, RecordsToDelete> recordsToDelete,
final DeleteRecordsOptions options) {
// requests need to be sent to partitions leader nodes so ...
// ... from the provided map it's needed to create more maps grouping topic/partition per leader
final Map<TopicPartition, KafkaFutureImpl<DeletedRecords>> futures = new HashMap<>(recordsToDelete.size());
for (TopicPartition topicPartition: recordsToDelete.keySet()) {
futures.put(topicPartition, new KafkaFutureImpl<>());
}
// preparing topics list for asking metadata about them
final Set<String> topics = new HashSet<>();
for (TopicPartition topicPartition: recordsToDelete.keySet()) {
topics.add(topicPartition.topic());
}
final long nowMetadata = time.milliseconds();
final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs());
// asking for topics metadata for getting partitions leaders
runnable.call(new Call("topicsMetadata", deadline,
new LeastLoadedNodeProvider()) {
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(convertToMetadataRequestTopic(topics))
.setAllowAutoTopicCreation(false));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
Map<String, Errors> errors = response.errors();
Cluster cluster = response.buildCluster();
// Group topic partitions by leader
Map<Node, Map<String, DeleteRecordsTopic>> leaders = new HashMap<>();
for (Map.Entry<TopicPartition, RecordsToDelete> entry: recordsToDelete.entrySet()) {
TopicPartition topicPartition = entry.getKey();
KafkaFutureImpl<DeletedRecords> future = futures.get(topicPartition);
// Fail partitions with topic errors
Errors topicError = errors.get(topicPartition.topic());
if (errors.containsKey(topicPartition.topic())) {
future.completeExceptionally(topicError.exception());
} else {
Node node = cluster.leaderFor(topicPartition);
if (node != null) {
Map<String, DeleteRecordsTopic> deletionsForLeader = leaders.computeIfAbsent(
node, key -> new HashMap<>());
DeleteRecordsTopic deleteRecords = deletionsForLeader.get(topicPartition.topic());
if (deleteRecords == null) {
deleteRecords = new DeleteRecordsTopic()
.setName(topicPartition.topic());
deletionsForLeader.put(topicPartition.topic(), deleteRecords);
}
deleteRecords.partitions().add(new DeleteRecordsPartition()
.setPartitionIndex(topicPartition.partition())
.setOffset(entry.getValue().beforeOffset()));
} else {
future.completeExceptionally(Errors.LEADER_NOT_AVAILABLE.exception());
}
}
}
final long deleteRecordsCallTimeMs = time.milliseconds();
for (final Map.Entry<Node, Map<String, DeleteRecordsTopic>> entry : leaders.entrySet()) {
final Map<String, DeleteRecordsTopic> partitionDeleteOffsets = entry.getValue();
final int brokerId = entry.getKey().id();
runnable.call(new Call("deleteRecords", deadline,
new ConstantNodeIdProvider(brokerId)) {
@Override
DeleteRecordsRequest.Builder createRequest(int timeoutMs) {
return new DeleteRecordsRequest.Builder(new DeleteRecordsRequestData()
.setTimeoutMs(timeoutMs)
.setTopics(new ArrayList<>(partitionDeleteOffsets.values())));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DeleteRecordsResponse response = (DeleteRecordsResponse) abstractResponse;
for (DeleteRecordsTopicResult topicResult: response.data().topics()) {
for (DeleteRecordsResponseData.DeleteRecordsPartitionResult partitionResult : topicResult.partitions()) {
KafkaFutureImpl<DeletedRecords> future = futures.get(new TopicPartition(topicResult.name(), partitionResult.partitionIndex()));
if (partitionResult.errorCode() == Errors.NONE.code()) {
future.complete(new DeletedRecords(partitionResult.lowWatermark()));
} else {
future.completeExceptionally(Errors.forCode(partitionResult.errorCode()).exception());
}
}
}
}
@Override
void handleFailure(Throwable throwable) {
Stream<KafkaFutureImpl<DeletedRecords>> callFutures =
partitionDeleteOffsets.values().stream().flatMap(
recordsToDelete ->
recordsToDelete.partitions().stream().map(partitionsToDelete ->
new TopicPartition(recordsToDelete.name(), partitionsToDelete.partitionIndex()))
).map(futures::get);
completeAllExceptionally(callFutures, throwable);
}
}, deleteRecordsCallTimeMs);
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, nowMetadata);
return new DeleteRecordsResult(new HashMap<>(futures));
}
@Override
public CreateDelegationTokenResult createDelegationToken(final CreateDelegationTokenOptions options) {
final KafkaFutureImpl<DelegationToken> delegationTokenFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
List<CreatableRenewers> renewers = new ArrayList<>();
for (KafkaPrincipal principal : options.renewers()) {
renewers.add(new CreatableRenewers()
.setPrincipalName(principal.getName())
.setPrincipalType(principal.getPrincipalType()));
}
runnable.call(new Call("createDelegationToken", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
CreateDelegationTokenRequest.Builder createRequest(int timeoutMs) {
CreateDelegationTokenRequestData data = new CreateDelegationTokenRequestData()
.setRenewers(renewers)
.setMaxLifetimeMs(options.maxlifeTimeMs());
if (options.owner().isPresent()) {
data.setOwnerPrincipalName(options.owner().get().getName());
data.setOwnerPrincipalType(options.owner().get().getPrincipalType());
}
return new CreateDelegationTokenRequest.Builder(data);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
CreateDelegationTokenResponse response = (CreateDelegationTokenResponse) abstractResponse;
if (response.hasError()) {
delegationTokenFuture.completeExceptionally(response.error().exception());
} else {
CreateDelegationTokenResponseData data = response.data();
TokenInformation tokenInfo = new TokenInformation(data.tokenId(), new KafkaPrincipal(data.principalType(), data.principalName()),
new KafkaPrincipal(data.tokenRequesterPrincipalType(), data.tokenRequesterPrincipalName()),
options.renewers(), data.issueTimestampMs(), data.maxTimestampMs(), data.expiryTimestampMs());
DelegationToken token = new DelegationToken(tokenInfo, data.hmac());
delegationTokenFuture.complete(token);
}
}
@Override
void handleFailure(Throwable throwable) {
delegationTokenFuture.completeExceptionally(throwable);
}
}, now);
return new CreateDelegationTokenResult(delegationTokenFuture);
}
@Override
public RenewDelegationTokenResult renewDelegationToken(final byte[] hmac, final RenewDelegationTokenOptions options) {
final KafkaFutureImpl<Long> expiryTimeFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("renewDelegationToken", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
RenewDelegationTokenRequest.Builder createRequest(int timeoutMs) {
return new RenewDelegationTokenRequest.Builder(
new RenewDelegationTokenRequestData()
.setHmac(hmac)
.setRenewPeriodMs(options.renewTimePeriodMs()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
RenewDelegationTokenResponse response = (RenewDelegationTokenResponse) abstractResponse;
if (response.hasError()) {
expiryTimeFuture.completeExceptionally(response.error().exception());
} else {
expiryTimeFuture.complete(response.expiryTimestamp());
}
}
@Override
void handleFailure(Throwable throwable) {
expiryTimeFuture.completeExceptionally(throwable);
}
}, now);
return new RenewDelegationTokenResult(expiryTimeFuture);
}
@Override
public ExpireDelegationTokenResult expireDelegationToken(final byte[] hmac, final ExpireDelegationTokenOptions options) {
final KafkaFutureImpl<Long> expiryTimeFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("expireDelegationToken", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
ExpireDelegationTokenRequest.Builder createRequest(int timeoutMs) {
return new ExpireDelegationTokenRequest.Builder(
new ExpireDelegationTokenRequestData()
.setHmac(hmac)
.setExpiryTimePeriodMs(options.expiryTimePeriodMs()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
ExpireDelegationTokenResponse response = (ExpireDelegationTokenResponse) abstractResponse;
if (response.hasError()) {
expiryTimeFuture.completeExceptionally(response.error().exception());
} else {
expiryTimeFuture.complete(response.expiryTimestamp());
}
}
@Override
void handleFailure(Throwable throwable) {
expiryTimeFuture.completeExceptionally(throwable);
}
}, now);
return new ExpireDelegationTokenResult(expiryTimeFuture);
}
@Override
public DescribeDelegationTokenResult describeDelegationToken(final DescribeDelegationTokenOptions options) {
final KafkaFutureImpl<List<DelegationToken>> tokensFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("describeDelegationToken", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
DescribeDelegationTokenRequest.Builder createRequest(int timeoutMs) {
return new DescribeDelegationTokenRequest.Builder(options.owners());
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeDelegationTokenResponse response = (DescribeDelegationTokenResponse) abstractResponse;
if (response.hasError()) {
tokensFuture.completeExceptionally(response.error().exception());
} else {
tokensFuture.complete(response.tokens());
}
}
@Override
void handleFailure(Throwable throwable) {
tokensFuture.completeExceptionally(throwable);
}
}, now);
return new DescribeDelegationTokenResult(tokensFuture);
}
private void rescheduleMetadataTask(MetadataOperationContext<?, ?> context, Supplier<List<Call>> nextCalls) {
log.info("Retrying to fetch metadata.");
// Requeue the task so that we can re-attempt fetching metadata
context.setResponse(Optional.empty());
Call metadataCall = getMetadataCall(context, nextCalls);
runnable.call(metadataCall, time.milliseconds());
}
@Override
public DescribeConsumerGroupsResult describeConsumerGroups(final Collection<String> groupIds,
final DescribeConsumerGroupsOptions options) {
SimpleAdminApiFuture<CoordinatorKey, ConsumerGroupDescription> future =
DescribeConsumerGroupsHandler.newFuture(groupIds);
DescribeConsumerGroupsHandler handler = new DescribeConsumerGroupsHandler(options.includeAuthorizedOperations(), logContext);
invokeDriver(handler, future, options.timeoutMs);
return new DescribeConsumerGroupsResult(future.all().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue)));
}
/**
* Returns a {@code Call} object to fetch the cluster metadata. Takes a List of Calls
* parameter to schedule actions that need to be taken using the metadata. The param is a Supplier
* so that it can be lazily created, so that it can use the results of the metadata call in its
* construction.
*
* @param <T> The type of return value of the KafkaFuture, like ListOffsetsResultInfo, etc.
* @param <O> The type of configuration option, like ListOffsetsOptions, etc
*/
private <T, O extends AbstractOptions<O>> Call getMetadataCall(MetadataOperationContext<T, O> context,
Supplier<List<Call>> nextCalls) {
return new Call("metadata", context.deadline(), new LeastLoadedNodeProvider()) {
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(convertToMetadataRequestTopic(context.topics()))
.setAllowAutoTopicCreation(false));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
MetadataOperationContext.handleMetadataErrors(response);
context.setResponse(Optional.of(response));
for (Call call : nextCalls.get()) {
runnable.call(call, time.milliseconds());
}
}
@Override
void handleFailure(Throwable throwable) {
for (KafkaFutureImpl<T> future : context.futures().values()) {
future.completeExceptionally(throwable);
}
}
};
}
private Set<AclOperation> validAclOperations(final int authorizedOperations) {
if (authorizedOperations == MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED) {
return null;
}
return Utils.from32BitField(authorizedOperations)
.stream()
.map(AclOperation::fromCode)
.filter(operation -> operation != AclOperation.UNKNOWN
&& operation != AclOperation.ALL
&& operation != AclOperation.ANY)
.collect(Collectors.toSet());
}
private final static class ListConsumerGroupsResults {
private final List<Throwable> errors;
private final HashMap<String, ConsumerGroupListing> listings;
private final HashSet<Node> remaining;
private final KafkaFutureImpl<Collection<Object>> future;
ListConsumerGroupsResults(Collection<Node> leaders,
KafkaFutureImpl<Collection<Object>> future) {
this.errors = new ArrayList<>();
this.listings = new HashMap<>();
this.remaining = new HashSet<>(leaders);
this.future = future;
tryComplete();
}
synchronized void addError(Throwable throwable, Node node) {
ApiError error = ApiError.fromThrowable(throwable);
if (error.message() == null || error.message().isEmpty()) {
errors.add(error.error().exception("Error listing groups on " + node));
} else {
errors.add(error.error().exception("Error listing groups on " + node + ": " + error.message()));
}
}
synchronized void addListing(ConsumerGroupListing listing) {
listings.put(listing.groupId(), listing);
}
synchronized void tryComplete(Node leader) {
remaining.remove(leader);
tryComplete();
}
private synchronized void tryComplete() {
if (remaining.isEmpty()) {
ArrayList<Object> results = new ArrayList<>(listings.values());
results.addAll(errors);
future.complete(results);
}
}
}
@Override
public ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options) {
final KafkaFutureImpl<Collection<Object>> all = new KafkaFutureImpl<>();
final long nowMetadata = time.milliseconds();
final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs());
runnable.call(new Call("findAllBrokers", deadline, new LeastLoadedNodeProvider()) {
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(Collections.emptyList())
.setAllowAutoTopicCreation(true));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse metadataResponse = (MetadataResponse) abstractResponse;
Collection<Node> nodes = metadataResponse.brokers();
if (nodes.isEmpty())
throw new StaleMetadataException("Metadata fetch failed due to missing broker list");
HashSet<Node> allNodes = new HashSet<>(nodes);
final ListConsumerGroupsResults results = new ListConsumerGroupsResults(allNodes, all);
for (final Node node : allNodes) {
final long nowList = time.milliseconds();
runnable.call(new Call("listConsumerGroups", deadline, new ConstantNodeIdProvider(node.id())) {
@Override
ListGroupsRequest.Builder createRequest(int timeoutMs) {
List<String> states = options.states()
.stream()
.map(ConsumerGroupState::toString)
.collect(Collectors.toList());
return new ListGroupsRequest.Builder(new ListGroupsRequestData().setStatesFilter(states));
}
private void maybeAddConsumerGroup(ListGroupsResponseData.ListedGroup group) {
String protocolType = group.protocolType();
if (protocolType.equals(ConsumerProtocol.PROTOCOL_TYPE) || protocolType.isEmpty()) {
final String groupId = group.groupId();
final Optional<ConsumerGroupState> state = group.groupState().equals("")
? Optional.empty()
: Optional.of(ConsumerGroupState.parse(group.groupState()));
final ConsumerGroupListing groupListing = new ConsumerGroupListing(groupId, protocolType.isEmpty(), state);
results.addListing(groupListing);
}
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
final ListGroupsResponse response = (ListGroupsResponse) abstractResponse;
synchronized (results) {
Errors error = Errors.forCode(response.data().errorCode());
if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS || error == Errors.COORDINATOR_NOT_AVAILABLE) {
throw error.exception();
} else if (error != Errors.NONE) {
results.addError(error.exception(), node);
} else {
for (ListGroupsResponseData.ListedGroup group : response.data().groups()) {
maybeAddConsumerGroup(group);
}
}
results.tryComplete(node);
}
}
@Override
void handleFailure(Throwable throwable) {
synchronized (results) {
results.addError(throwable, node);
results.tryComplete(node);
}
}
}, nowList);
}
}
@Override
void handleFailure(Throwable throwable) {
KafkaException exception = new KafkaException("Failed to find brokers to send ListGroups", throwable);
all.complete(Collections.singletonList(exception));
}
}, nowMetadata);
return new ListConsumerGroupsResult(all);
}
@Override
public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec> groupSpecs,
ListConsumerGroupOffsetsOptions options) {
SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, OffsetAndMetadata>> future =
ListConsumerGroupOffsetsHandler.newFuture(groupSpecs.keySet());
ListConsumerGroupOffsetsHandler handler =
new ListConsumerGroupOffsetsHandler(groupSpecs, options.requireStable(), logContext);
invokeDriver(handler, future, options.timeoutMs);
return new ListConsumerGroupOffsetsResult(future.all());
}
@Override
public DeleteConsumerGroupsResult deleteConsumerGroups(Collection<String> groupIds, DeleteConsumerGroupsOptions options) {
SimpleAdminApiFuture<CoordinatorKey, Void> future =
DeleteConsumerGroupsHandler.newFuture(groupIds);
DeleteConsumerGroupsHandler handler = new DeleteConsumerGroupsHandler(logContext);
invokeDriver(handler, future, options.timeoutMs);
return new DeleteConsumerGroupsResult(future.all().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue)));
}
@Override
public DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(
String groupId,
Set<TopicPartition> partitions,
DeleteConsumerGroupOffsetsOptions options) {
SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, Errors>> future =
DeleteConsumerGroupOffsetsHandler.newFuture(groupId);
DeleteConsumerGroupOffsetsHandler handler = new DeleteConsumerGroupOffsetsHandler(groupId, partitions, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new DeleteConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId)), partitions);
}
@Override
public Map<MetricName, ? extends Metric> metrics() {
return Collections.unmodifiableMap(this.metrics.metrics());
}
@Override
public ElectLeadersResult electLeaders(
final ElectionType electionType,
final Set<TopicPartition> topicPartitions,
ElectLeadersOptions options) {
final KafkaFutureImpl<Map<TopicPartition, Optional<Throwable>>> electionFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("electLeaders", calcDeadlineMs(now, options.timeoutMs()),
new ControllerNodeProvider()) {
@Override
public ElectLeadersRequest.Builder createRequest(int timeoutMs) {
return new ElectLeadersRequest.Builder(electionType, topicPartitions, timeoutMs);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
ElectLeadersResponse response = (ElectLeadersResponse) abstractResponse;
Map<TopicPartition, Optional<Throwable>> result = ElectLeadersResponse.electLeadersResult(response.data());
// For version == 0 then errorCode would be 0 which maps to Errors.NONE
Errors error = Errors.forCode(response.data().errorCode());
if (error != Errors.NONE) {
electionFuture.completeExceptionally(error.exception());
return;
}
electionFuture.complete(result);
}
@Override
void handleFailure(Throwable throwable) {
electionFuture.completeExceptionally(throwable);
}
}, now);
return new ElectLeadersResult(electionFuture);
}
@Override
public AlterPartitionReassignmentsResult alterPartitionReassignments(
Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments,
AlterPartitionReassignmentsOptions options) {
final Map<TopicPartition, KafkaFutureImpl<Void>> futures = new HashMap<>();
final Map<String, Map<Integer, Optional<NewPartitionReassignment>>> topicsToReassignments = new TreeMap<>();
for (Map.Entry<TopicPartition, Optional<NewPartitionReassignment>> entry : reassignments.entrySet()) {
String topic = entry.getKey().topic();
int partition = entry.getKey().partition();
TopicPartition topicPartition = new TopicPartition(topic, partition);
Optional<NewPartitionReassignment> reassignment = entry.getValue();
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
futures.put(topicPartition, future);
if (topicNameIsUnrepresentable(topic)) {
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
topic + "' cannot be represented in a request."));
} else if (topicPartition.partition() < 0) {
future.completeExceptionally(new InvalidTopicException("The given partition index " +
topicPartition.partition() + " is not valid."));
} else {
Map<Integer, Optional<NewPartitionReassignment>> partitionReassignments =
topicsToReassignments.get(topicPartition.topic());
if (partitionReassignments == null) {
partitionReassignments = new TreeMap<>();
topicsToReassignments.put(topic, partitionReassignments);
}
partitionReassignments.put(partition, reassignment);
}
}
final long now = time.milliseconds();
Call call = new Call("alterPartitionReassignments", calcDeadlineMs(now, options.timeoutMs()),
new ControllerNodeProvider()) {
@Override
public AlterPartitionReassignmentsRequest.Builder createRequest(int timeoutMs) {
AlterPartitionReassignmentsRequestData data =
new AlterPartitionReassignmentsRequestData();
for (Map.Entry<String, Map<Integer, Optional<NewPartitionReassignment>>> entry :
topicsToReassignments.entrySet()) {
String topicName = entry.getKey();
Map<Integer, Optional<NewPartitionReassignment>> partitionsToReassignments = entry.getValue();
List<ReassignablePartition> reassignablePartitions = new ArrayList<>();
for (Map.Entry<Integer, Optional<NewPartitionReassignment>> partitionEntry :
partitionsToReassignments.entrySet()) {
int partitionIndex = partitionEntry.getKey();
Optional<NewPartitionReassignment> reassignment = partitionEntry.getValue();
ReassignablePartition reassignablePartition = new ReassignablePartition()
.setPartitionIndex(partitionIndex)
.setReplicas(reassignment.map(NewPartitionReassignment::targetReplicas).orElse(null));
reassignablePartitions.add(reassignablePartition);
}
ReassignableTopic reassignableTopic = new ReassignableTopic()
.setName(topicName)
.setPartitions(reassignablePartitions);
data.topics().add(reassignableTopic);
}
data.setTimeoutMs(timeoutMs);
return new AlterPartitionReassignmentsRequest.Builder(data);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterPartitionReassignmentsResponse response = (AlterPartitionReassignmentsResponse) abstractResponse;
Map<TopicPartition, ApiException> errors = new HashMap<>();
int receivedResponsesCount = 0;
Errors topLevelError = Errors.forCode(response.data().errorCode());
switch (topLevelError) {
case NONE:
receivedResponsesCount += validateTopicResponses(response.data().responses(), errors);
break;
case NOT_CONTROLLER:
handleNotControllerError(topLevelError);
break;
default:
for (ReassignableTopicResponse topicResponse : response.data().responses()) {
String topicName = topicResponse.name();
for (ReassignablePartitionResponse partition : topicResponse.partitions()) {
errors.put(
new TopicPartition(topicName, partition.partitionIndex()),
new ApiError(topLevelError, response.data().errorMessage()).exception()
);
receivedResponsesCount += 1;
}
}
break;
}
assertResponseCountMatch(errors, receivedResponsesCount);
for (Map.Entry<TopicPartition, ApiException> entry : errors.entrySet()) {
ApiException exception = entry.getValue();
if (exception == null)
futures.get(entry.getKey()).complete(null);
else
futures.get(entry.getKey()).completeExceptionally(exception);
}
}
private void assertResponseCountMatch(Map<TopicPartition, ApiException> errors, int receivedResponsesCount) {
int expectedResponsesCount = topicsToReassignments.values().stream().mapToInt(Map::size).sum();
if (errors.values().stream().noneMatch(Objects::nonNull) && receivedResponsesCount != expectedResponsesCount) {
String quantifier = receivedResponsesCount > expectedResponsesCount ? "many" : "less";
throw new UnknownServerException("The server returned too " + quantifier + " results." +
"Expected " + expectedResponsesCount + " but received " + receivedResponsesCount);
}
}
private int validateTopicResponses(List<ReassignableTopicResponse> topicResponses,
Map<TopicPartition, ApiException> errors) {
int receivedResponsesCount = 0;
for (ReassignableTopicResponse topicResponse : topicResponses) {
String topicName = topicResponse.name();
for (ReassignablePartitionResponse partResponse : topicResponse.partitions()) {
Errors partitionError = Errors.forCode(partResponse.errorCode());
TopicPartition tp = new TopicPartition(topicName, partResponse.partitionIndex());
if (partitionError == Errors.NONE) {
errors.put(tp, null);
} else {
errors.put(tp, new ApiError(partitionError, partResponse.errorMessage()).exception());
}
receivedResponsesCount += 1;
}
}
return receivedResponsesCount;
}
@Override
void handleFailure(Throwable throwable) {
for (KafkaFutureImpl<Void> future : futures.values()) {
future.completeExceptionally(throwable);
}
}
};
if (!topicsToReassignments.isEmpty()) {
runnable.call(call, now);
}
return new AlterPartitionReassignmentsResult(new HashMap<>(futures));
}
@Override
public ListPartitionReassignmentsResult listPartitionReassignments(Optional<Set<TopicPartition>> partitions,
ListPartitionReassignmentsOptions options) {
final KafkaFutureImpl<Map<TopicPartition, PartitionReassignment>> partitionReassignmentsFuture = new KafkaFutureImpl<>();
if (partitions.isPresent()) {
for (TopicPartition tp : partitions.get()) {
String topic = tp.topic();
int partition = tp.partition();
if (topicNameIsUnrepresentable(topic)) {
partitionReassignmentsFuture.completeExceptionally(new InvalidTopicException("The given topic name '"
+ topic + "' cannot be represented in a request."));
} else if (partition < 0) {
partitionReassignmentsFuture.completeExceptionally(new InvalidTopicException("The given partition index " +
partition + " is not valid."));
}
if (partitionReassignmentsFuture.isCompletedExceptionally())
return new ListPartitionReassignmentsResult(partitionReassignmentsFuture);
}
}
final long now = time.milliseconds();
runnable.call(new Call("listPartitionReassignments", calcDeadlineMs(now, options.timeoutMs()),
new ControllerNodeProvider()) {
@Override
ListPartitionReassignmentsRequest.Builder createRequest(int timeoutMs) {
ListPartitionReassignmentsRequestData listData = new ListPartitionReassignmentsRequestData();
listData.setTimeoutMs(timeoutMs);
if (partitions.isPresent()) {
Map<String, ListPartitionReassignmentsTopics> reassignmentTopicByTopicName = new HashMap<>();
for (TopicPartition tp : partitions.get()) {
if (!reassignmentTopicByTopicName.containsKey(tp.topic()))
reassignmentTopicByTopicName.put(tp.topic(), new ListPartitionReassignmentsTopics().setName(tp.topic()));
reassignmentTopicByTopicName.get(tp.topic()).partitionIndexes().add(tp.partition());
}
listData.setTopics(new ArrayList<>(reassignmentTopicByTopicName.values()));
}
return new ListPartitionReassignmentsRequest.Builder(listData);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
ListPartitionReassignmentsResponse response = (ListPartitionReassignmentsResponse) abstractResponse;
Errors error = Errors.forCode(response.data().errorCode());
switch (error) {
case NONE:
break;
case NOT_CONTROLLER:
handleNotControllerError(error);
break;
default:
partitionReassignmentsFuture.completeExceptionally(new ApiError(error, response.data().errorMessage()).exception());
break;
}
Map<TopicPartition, PartitionReassignment> reassignmentMap = new HashMap<>();
for (OngoingTopicReassignment topicReassignment : response.data().topics()) {
String topicName = topicReassignment.name();
for (OngoingPartitionReassignment partitionReassignment : topicReassignment.partitions()) {
reassignmentMap.put(
new TopicPartition(topicName, partitionReassignment.partitionIndex()),
new PartitionReassignment(partitionReassignment.replicas(), partitionReassignment.addingReplicas(), partitionReassignment.removingReplicas())
);
}
}
partitionReassignmentsFuture.complete(reassignmentMap);
}
@Override
void handleFailure(Throwable throwable) {
partitionReassignmentsFuture.completeExceptionally(throwable);
}
}, now);
return new ListPartitionReassignmentsResult(partitionReassignmentsFuture);
}
private void handleNotControllerError(AbstractResponse response) throws ApiException {
if (response.errorCounts().containsKey(Errors.NOT_CONTROLLER)) {
handleNotControllerError(Errors.NOT_CONTROLLER);
}
}
private void handleNotControllerError(Errors error) throws ApiException {
metadataManager.clearController();
metadataManager.requestUpdate();
throw error.exception();
}
/**
* Returns the broker id pertaining to the given resource, or null if the resource is not associated
* with a particular broker.
*/
private Integer nodeFor(ConfigResource resource) {
if ((resource.type() == ConfigResource.Type.BROKER && !resource.isDefault())
|| resource.type() == ConfigResource.Type.BROKER_LOGGER) {
return Integer.valueOf(resource.name());
} else {
return null;
}
}
private List<MemberIdentity> getMembersFromGroup(String groupId, String reason) {
Collection<MemberDescription> members;
try {
members = describeConsumerGroups(Collections.singleton(groupId)).describedGroups().get(groupId).get().members();
} catch (Exception ex) {
throw new KafkaException("Encounter exception when trying to get members from group: " + groupId, ex);
}
List<MemberIdentity> membersToRemove = new ArrayList<>();
for (final MemberDescription member : members) {
MemberIdentity memberIdentity = new MemberIdentity().setReason(reason);
if (member.groupInstanceId().isPresent()) {
memberIdentity.setGroupInstanceId(member.groupInstanceId().get());
} else {
memberIdentity.setMemberId(member.consumerId());
}
membersToRemove.add(memberIdentity);
}
return membersToRemove;
}
@Override
public RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(String groupId,
RemoveMembersFromConsumerGroupOptions options) {
String reason = options.reason() == null || options.reason().isEmpty() ?
DEFAULT_LEAVE_GROUP_REASON : JoinGroupRequest.maybeTruncateReason(options.reason());
List<MemberIdentity> members;
if (options.removeAll()) {
members = getMembersFromGroup(groupId, reason);
} else {
members = options.members().stream()
.map(m -> m.toMemberIdentity().setReason(reason))
.collect(Collectors.toList());
}
SimpleAdminApiFuture<CoordinatorKey, Map<MemberIdentity, Errors>> future =
RemoveMembersFromConsumerGroupHandler.newFuture(groupId);
RemoveMembersFromConsumerGroupHandler handler = new RemoveMembersFromConsumerGroupHandler(groupId, members, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new RemoveMembersFromConsumerGroupResult(future.get(CoordinatorKey.byGroupId(groupId)), options.members());
}
@Override
public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(
String groupId,
Map<TopicPartition, OffsetAndMetadata> offsets,
AlterConsumerGroupOffsetsOptions options
) {
SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, Errors>> future =
AlterConsumerGroupOffsetsHandler.newFuture(groupId);
AlterConsumerGroupOffsetsHandler handler = new AlterConsumerGroupOffsetsHandler(groupId, offsets, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new AlterConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId)));
}
@Override
public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets,
ListOffsetsOptions options) {
// preparing topics list for asking metadata about them
final Map<TopicPartition, KafkaFutureImpl<ListOffsetsResultInfo>> futures = new HashMap<>(topicPartitionOffsets.size());
final Set<String> topics = new HashSet<>();
for (TopicPartition topicPartition : topicPartitionOffsets.keySet()) {
topics.add(topicPartition.topic());
futures.put(topicPartition, new KafkaFutureImpl<>());
}
final long nowMetadata = time.milliseconds();
final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs());
MetadataOperationContext<ListOffsetsResultInfo, ListOffsetsOptions> context =
new MetadataOperationContext<>(topics, options, deadline, futures);
Call metadataCall = getMetadataCall(context,
() -> KafkaAdminClient.this.getListOffsetsCalls(context, topicPartitionOffsets, futures));
runnable.call(metadataCall, nowMetadata);
return new ListOffsetsResult(new HashMap<>(futures));
}
// visible for benchmark
List<Call> getListOffsetsCalls(MetadataOperationContext<ListOffsetsResultInfo, ListOffsetsOptions> context,
Map<TopicPartition, OffsetSpec> topicPartitionOffsets,
Map<TopicPartition, KafkaFutureImpl<ListOffsetsResultInfo>> futures) {
MetadataResponse mr = context.response().orElseThrow(() -> new IllegalStateException("No Metadata response"));
Cluster clusterSnapshot = mr.buildCluster();
List<Call> calls = new ArrayList<>();
// grouping topic partitions per leader
Map<Node, Map<String, ListOffsetsTopic>> leaders = new HashMap<>();
for (Map.Entry<TopicPartition, OffsetSpec> entry: topicPartitionOffsets.entrySet()) {
OffsetSpec offsetSpec = entry.getValue();
TopicPartition tp = entry.getKey();
KafkaFutureImpl<ListOffsetsResultInfo> future = futures.get(tp);
long offsetQuery = getOffsetFromOffsetSpec(offsetSpec);
// avoid sending listOffsets request for topics with errors
if (!mr.errors().containsKey(tp.topic())) {
Node node = clusterSnapshot.leaderFor(tp);
if (node != null) {
Map<String, ListOffsetsTopic> leadersOnNode = leaders.computeIfAbsent(node, k -> new HashMap<>());
ListOffsetsTopic topic = leadersOnNode.computeIfAbsent(tp.topic(), k -> new ListOffsetsTopic().setName(tp.topic()));
topic.partitions().add(new ListOffsetsPartition().setPartitionIndex(tp.partition()).setTimestamp(offsetQuery));
} else {
future.completeExceptionally(Errors.LEADER_NOT_AVAILABLE.exception());
}
} else {
future.completeExceptionally(mr.errors().get(tp.topic()).exception());
}
}
for (final Map.Entry<Node, Map<String, ListOffsetsTopic>> entry : leaders.entrySet()) {
final int brokerId = entry.getKey().id();
calls.add(new Call("listOffsets on broker " + brokerId, context.deadline(), new ConstantNodeIdProvider(brokerId)) {
final List<ListOffsetsTopic> partitionsToQuery = new ArrayList<>(entry.getValue().values());
private boolean supportsMaxTimestamp = partitionsToQuery.stream()
.flatMap(t -> t.partitions().stream())
.anyMatch(p -> p.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP);
@Override
ListOffsetsRequest.Builder createRequest(int timeoutMs) {
return ListOffsetsRequest.Builder
.forConsumer(true, context.options().isolationLevel(), supportsMaxTimestamp)
.setTargetTimes(partitionsToQuery);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
ListOffsetsResponse response = (ListOffsetsResponse) abstractResponse;
Map<TopicPartition, OffsetSpec> retryTopicPartitionOffsets = new HashMap<>();
for (ListOffsetsTopicResponse topic : response.topics()) {
for (ListOffsetsPartitionResponse partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
KafkaFutureImpl<ListOffsetsResultInfo> future = futures.get(tp);
Errors error = Errors.forCode(partition.errorCode());
OffsetSpec offsetRequestSpec = topicPartitionOffsets.get(tp);
if (offsetRequestSpec == null) {
log.warn("Server response mentioned unknown topic partition {}", tp);
} else if (MetadataOperationContext.shouldRefreshMetadata(error)) {
retryTopicPartitionOffsets.put(tp, offsetRequestSpec);
} else if (error == Errors.NONE) {
Optional<Integer> leaderEpoch = (partition.leaderEpoch() == ListOffsetsResponse.UNKNOWN_EPOCH)
? Optional.empty()
: Optional.of(partition.leaderEpoch());
future.complete(new ListOffsetsResultInfo(partition.offset(), partition.timestamp(), leaderEpoch));
} else {
future.completeExceptionally(error.exception());
}
}
}
if (retryTopicPartitionOffsets.isEmpty()) {
// The server should send back a response for every topic partition. But do a sanity check anyway.
for (ListOffsetsTopic topic : partitionsToQuery) {
for (ListOffsetsPartition partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
ApiException error = new ApiException("The response from broker " + brokerId +
" did not contain a result for topic partition " + tp);
futures.get(tp).completeExceptionally(error);
}
}
} else {
Set<String> retryTopics = retryTopicPartitionOffsets.keySet().stream().map(
TopicPartition::topic).collect(Collectors.toSet());
MetadataOperationContext<ListOffsetsResultInfo, ListOffsetsOptions> retryContext =
new MetadataOperationContext<>(retryTopics, context.options(), context.deadline(), futures);
rescheduleMetadataTask(retryContext, () -> getListOffsetsCalls(retryContext, retryTopicPartitionOffsets, futures));
}
}
@Override
void handleFailure(Throwable throwable) {
for (ListOffsetsTopic topic : entry.getValue().values()) {
for (ListOffsetsPartition partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
KafkaFutureImpl<ListOffsetsResultInfo> future = futures.get(tp);
future.completeExceptionally(throwable);
}
}
}
@Override
boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
if (supportsMaxTimestamp) {
supportsMaxTimestamp = false;
// fail any unsupported futures and remove partitions from the downgraded retry
Iterator<ListOffsetsTopic> topicIterator = partitionsToQuery.iterator();
while (topicIterator.hasNext()) {
ListOffsetsTopic topic = topicIterator.next();
Iterator<ListOffsetsPartition> partitionIterator = topic.partitions().iterator();
while (partitionIterator.hasNext()) {
ListOffsetsPartition partition = partitionIterator.next();
if (partition.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP) {
futures.get(new TopicPartition(topic.name(), partition.partitionIndex()))
.completeExceptionally(new UnsupportedVersionException(
"Broker " + brokerId + " does not support MAX_TIMESTAMP offset spec"));
partitionIterator.remove();
}
}
if (topic.partitions().isEmpty()) {
topicIterator.remove();
}
}
return !partitionsToQuery.isEmpty();
}
return false;
}
});
}
return calls;
}
@Override
public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options) {
KafkaFutureImpl<Map<ClientQuotaEntity, Map<String, Double>>> future = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("describeClientQuotas", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
DescribeClientQuotasRequest.Builder createRequest(int timeoutMs) {
return new DescribeClientQuotasRequest.Builder(filter);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeClientQuotasResponse response = (DescribeClientQuotasResponse) abstractResponse;
response.complete(future);
}
@Override
void handleFailure(Throwable throwable) {
future.completeExceptionally(throwable);
}
}, now);
return new DescribeClientQuotasResult(future);
}
@Override
public AlterClientQuotasResult alterClientQuotas(Collection<ClientQuotaAlteration> entries, AlterClientQuotasOptions options) {
Map<ClientQuotaEntity, KafkaFutureImpl<Void>> futures = new HashMap<>(entries.size());
for (ClientQuotaAlteration entry : entries) {
futures.put(entry.entity(), new KafkaFutureImpl<>());
}
final long now = time.milliseconds();
runnable.call(new Call("alterClientQuotas", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
AlterClientQuotasRequest.Builder createRequest(int timeoutMs) {
return new AlterClientQuotasRequest.Builder(entries, options.validateOnly());
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
AlterClientQuotasResponse response = (AlterClientQuotasResponse) abstractResponse;
response.complete(futures);
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return new AlterClientQuotasResult(Collections.unmodifiableMap(futures));
}
@Override
public DescribeUserScramCredentialsResult describeUserScramCredentials(List<String> users, DescribeUserScramCredentialsOptions options) {
final KafkaFutureImpl<DescribeUserScramCredentialsResponseData> dataFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
Call call = new Call("describeUserScramCredentials", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
public DescribeUserScramCredentialsRequest.Builder createRequest(final int timeoutMs) {
final DescribeUserScramCredentialsRequestData requestData = new DescribeUserScramCredentialsRequestData();
if (users != null && !users.isEmpty()) {
final List<UserName> userNames = new ArrayList<>(users.size());
for (final String user : users) {
if (user != null) {
userNames.add(new UserName().setName(user));
}
}
requestData.setUsers(userNames);
}
return new DescribeUserScramCredentialsRequest.Builder(requestData);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeUserScramCredentialsResponse response = (DescribeUserScramCredentialsResponse) abstractResponse;
DescribeUserScramCredentialsResponseData data = response.data();
short messageLevelErrorCode = data.errorCode();
if (messageLevelErrorCode != Errors.NONE.code()) {
dataFuture.completeExceptionally(Errors.forCode(messageLevelErrorCode).exception(data.errorMessage()));
} else {
dataFuture.complete(data);
}
}
@Override
void handleFailure(Throwable throwable) {
dataFuture.completeExceptionally(throwable);
}
};
runnable.call(call, now);
return new DescribeUserScramCredentialsResult(dataFuture);
}
@Override
public AlterUserScramCredentialsResult alterUserScramCredentials(List<UserScramCredentialAlteration> alterations,
AlterUserScramCredentialsOptions options) {
final long now = time.milliseconds();
final Map<String, KafkaFutureImpl<Void>> futures = new HashMap<>();
for (UserScramCredentialAlteration alteration: alterations) {
futures.put(alteration.user(), new KafkaFutureImpl<>());
}
final Map<String, Exception> userIllegalAlterationExceptions = new HashMap<>();
// We need to keep track of users with deletions of an unknown SCRAM mechanism
final String usernameMustNotBeEmptyMsg = "Username must not be empty";
String passwordMustNotBeEmptyMsg = "Password must not be empty";
final String unknownScramMechanismMsg = "Unknown SCRAM mechanism";
alterations.stream().filter(a -> a instanceof UserScramCredentialDeletion).forEach(alteration -> {
final String user = alteration.user();
if (user == null || user.isEmpty()) {
userIllegalAlterationExceptions.put(alteration.user(), new UnacceptableCredentialException(usernameMustNotBeEmptyMsg));
} else {
UserScramCredentialDeletion deletion = (UserScramCredentialDeletion) alteration;
ScramMechanism mechanism = deletion.mechanism();
if (mechanism == null || mechanism == ScramMechanism.UNKNOWN) {
userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg));
}
}
});
// Creating an upsertion may throw InvalidKeyException or NoSuchAlgorithmException,
// so keep track of which users are affected by such a failure so we can fail all their alterations later
final Map<String, Map<ScramMechanism, AlterUserScramCredentialsRequestData.ScramCredentialUpsertion>> userInsertions = new HashMap<>();
alterations.stream().filter(a -> a instanceof UserScramCredentialUpsertion)
.filter(alteration -> !userIllegalAlterationExceptions.containsKey(alteration.user()))
.forEach(alteration -> {
final String user = alteration.user();
if (user == null || user.isEmpty()) {
userIllegalAlterationExceptions.put(alteration.user(), new UnacceptableCredentialException(usernameMustNotBeEmptyMsg));
} else {
UserScramCredentialUpsertion upsertion = (UserScramCredentialUpsertion) alteration;
try {
byte[] password = upsertion.password();
if (password == null || password.length == 0) {
userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(passwordMustNotBeEmptyMsg));
} else {
ScramMechanism mechanism = upsertion.credentialInfo().mechanism();
if (mechanism == null || mechanism == ScramMechanism.UNKNOWN) {
userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg));
} else {
userInsertions.putIfAbsent(user, new HashMap<>());
userInsertions.get(user).put(mechanism, getScramCredentialUpsertion(upsertion));
}
}
} catch (NoSuchAlgorithmException e) {
// we might overwrite an exception from a previous alteration, but we don't really care
// since we just need to mark this user as having at least one illegal alteration
// and make an exception instance available for completing the corresponding future exceptionally
userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg));
} catch (InvalidKeyException e) {
// generally shouldn't happen since we deal with the empty password case above,
// but we still need to catch/handle it
userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(e.getMessage(), e));
}
}
});
// submit alterations only for users that do not have an illegal alteration as identified above
Call call = new Call("alterUserScramCredentials", calcDeadlineMs(now, options.timeoutMs()),
new ControllerNodeProvider()) {
@Override
public AlterUserScramCredentialsRequest.Builder createRequest(int timeoutMs) {
return new AlterUserScramCredentialsRequest.Builder(
new AlterUserScramCredentialsRequestData().setUpsertions(alterations.stream()
.filter(a -> a instanceof UserScramCredentialUpsertion)
.filter(a -> !userIllegalAlterationExceptions.containsKey(a.user()))
.map(a -> userInsertions.get(a.user()).get(((UserScramCredentialUpsertion) a).credentialInfo().mechanism()))
.collect(Collectors.toList()))
.setDeletions(alterations.stream()
.filter(a -> a instanceof UserScramCredentialDeletion)
.filter(a -> !userIllegalAlterationExceptions.containsKey(a.user()))
.map(d -> getScramCredentialDeletion((UserScramCredentialDeletion) d))
.collect(Collectors.toList())));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterUserScramCredentialsResponse response = (AlterUserScramCredentialsResponse) abstractResponse;
// Check for controller change
for (Errors error : response.errorCounts().keySet()) {
if (error == Errors.NOT_CONTROLLER) {
handleNotControllerError(error);
}
}
/* Now that we have the results for the ones we sent,
* fail any users that have an illegal alteration as identified above.
* Be sure to do this after the NOT_CONTROLLER error check above
* so that all errors are consistent in that case.
*/
userIllegalAlterationExceptions.entrySet().stream().forEach(entry -> {
futures.get(entry.getKey()).completeExceptionally(entry.getValue());
});
response.data().results().forEach(result -> {
KafkaFutureImpl<Void> future = futures.get(result.user());
if (future == null) {
log.warn("Server response mentioned unknown user {}", result.user());
} else {
Errors error = Errors.forCode(result.errorCode());
if (error != Errors.NONE) {
future.completeExceptionally(error.exception(result.errorMessage()));
} else {
future.complete(null);
}
}
});
completeUnrealizedFutures(
futures.entrySet().stream(),
user -> "The broker response did not contain a result for user " + user);
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
};
runnable.call(call, now);
return new AlterUserScramCredentialsResult(new HashMap<>(futures));
}
private static AlterUserScramCredentialsRequestData.ScramCredentialUpsertion getScramCredentialUpsertion(UserScramCredentialUpsertion u) throws InvalidKeyException, NoSuchAlgorithmException {
AlterUserScramCredentialsRequestData.ScramCredentialUpsertion retval = new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion();
return retval.setName(u.user())
.setMechanism(u.credentialInfo().mechanism().type())
.setIterations(u.credentialInfo().iterations())
.setSalt(u.salt())
.setSaltedPassword(getSaltedPasword(u.credentialInfo().mechanism(), u.password(), u.salt(), u.credentialInfo().iterations()));
}
private static AlterUserScramCredentialsRequestData.ScramCredentialDeletion getScramCredentialDeletion(UserScramCredentialDeletion d) {
return new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(d.user()).setMechanism(d.mechanism().type());
}
private static byte[] getSaltedPasword(ScramMechanism publicScramMechanism, byte[] password, byte[] salt, int iterations) throws NoSuchAlgorithmException, InvalidKeyException {
return new ScramFormatter(org.apache.kafka.common.security.scram.internals.ScramMechanism.forMechanismName(publicScramMechanism.mechanismName()))
.hi(password, salt, iterations);
}
@Override
public DescribeFeaturesResult describeFeatures(final DescribeFeaturesOptions options) {
final KafkaFutureImpl<FeatureMetadata> future = new KafkaFutureImpl<>();
final long now = time.milliseconds();
final Call call = new Call(
"describeFeatures", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {
private FeatureMetadata createFeatureMetadata(final ApiVersionsResponse response) {
final Map<String, FinalizedVersionRange> finalizedFeatures = new HashMap<>();
for (final FinalizedFeatureKey key : response.data().finalizedFeatures().valuesSet()) {
finalizedFeatures.put(key.name(), new FinalizedVersionRange(key.minVersionLevel(), key.maxVersionLevel()));
}
Optional<Long> finalizedFeaturesEpoch;
if (response.data().finalizedFeaturesEpoch() >= 0L) {
finalizedFeaturesEpoch = Optional.of(response.data().finalizedFeaturesEpoch());
} else {
finalizedFeaturesEpoch = Optional.empty();
}
final Map<String, SupportedVersionRange> supportedFeatures = new HashMap<>();
for (final SupportedFeatureKey key : response.data().supportedFeatures().valuesSet()) {
supportedFeatures.put(key.name(), new SupportedVersionRange(key.minVersion(), key.maxVersion()));
}
return new FeatureMetadata(finalizedFeatures, finalizedFeaturesEpoch, supportedFeatures);
}
@Override
ApiVersionsRequest.Builder createRequest(int timeoutMs) {
return new ApiVersionsRequest.Builder();
}
@Override
void handleResponse(AbstractResponse response) {
final ApiVersionsResponse apiVersionsResponse = (ApiVersionsResponse) response;
if (apiVersionsResponse.data().errorCode() == Errors.NONE.code()) {
future.complete(createFeatureMetadata(apiVersionsResponse));
} else {
future.completeExceptionally(Errors.forCode(apiVersionsResponse.data().errorCode()).exception());
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(Collections.singletonList(future), throwable);
}
};
runnable.call(call, now);
return new DescribeFeaturesResult(future);
}
@Override
public UpdateFeaturesResult updateFeatures(final Map<String, FeatureUpdate> featureUpdates,
final UpdateFeaturesOptions options) {
if (featureUpdates.isEmpty()) {
throw new IllegalArgumentException("Feature updates can not be null or empty.");
}
final Map<String, KafkaFutureImpl<Void>> updateFutures = new HashMap<>();
for (final Map.Entry<String, FeatureUpdate> entry : featureUpdates.entrySet()) {
final String feature = entry.getKey();
if (Utils.isBlank(feature)) {
throw new IllegalArgumentException("Provided feature can not be empty.");
}
updateFutures.put(entry.getKey(), new KafkaFutureImpl<>());
}
final long now = time.milliseconds();
final Call call = new Call("updateFeatures", calcDeadlineMs(now, options.timeoutMs()),
new ControllerNodeProvider()) {
@Override
UpdateFeaturesRequest.Builder createRequest(int timeoutMs) {
final UpdateFeaturesRequestData.FeatureUpdateKeyCollection featureUpdatesRequestData
= new UpdateFeaturesRequestData.FeatureUpdateKeyCollection();
for (Map.Entry<String, FeatureUpdate> entry : featureUpdates.entrySet()) {
final String feature = entry.getKey();
final FeatureUpdate update = entry.getValue();
final UpdateFeaturesRequestData.FeatureUpdateKey requestItem =
new UpdateFeaturesRequestData.FeatureUpdateKey();
requestItem.setFeature(feature);
requestItem.setMaxVersionLevel(update.maxVersionLevel());
requestItem.setUpgradeType(update.upgradeType().code());
featureUpdatesRequestData.add(requestItem);
}
return new UpdateFeaturesRequest.Builder(
new UpdateFeaturesRequestData()
.setTimeoutMs(timeoutMs)
.setValidateOnly(options.validateOnly())
.setFeatureUpdates(featureUpdatesRequestData));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
final UpdateFeaturesResponse response =
(UpdateFeaturesResponse) abstractResponse;
ApiError topLevelError = response.topLevelError();
switch (topLevelError.error()) {
case NONE:
for (final UpdatableFeatureResult result : response.data().results()) {
final KafkaFutureImpl<Void> future = updateFutures.get(result.feature());
if (future == null) {
log.warn("Server response mentioned unknown feature {}", result.feature());
} else {
final Errors error = Errors.forCode(result.errorCode());
if (error == Errors.NONE) {
future.complete(null);
} else {
future.completeExceptionally(error.exception(result.errorMessage()));
}
}
}
// The server should send back a response for every feature, but we do a sanity check anyway.
completeUnrealizedFutures(updateFutures.entrySet().stream(),
feature -> "The controller response did not contain a result for feature " + feature);
break;
case NOT_CONTROLLER:
handleNotControllerError(topLevelError.error());
break;
default:
for (final Map.Entry<String, KafkaFutureImpl<Void>> entry : updateFutures.entrySet()) {
entry.getValue().completeExceptionally(topLevelError.exception());
}
break;
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(updateFutures.values(), throwable);
}
};
runnable.call(call, now);
return new UpdateFeaturesResult(new HashMap<>(updateFutures));
}
@Override
public DescribeMetadataQuorumResult describeMetadataQuorum(DescribeMetadataQuorumOptions options) {
NodeProvider provider = new LeastLoadedNodeProvider();
final KafkaFutureImpl<QuorumInfo> future = new KafkaFutureImpl<>();
final long now = time.milliseconds();
final Call call = new Call(
"describeMetadataQuorum", calcDeadlineMs(now, options.timeoutMs()), provider) {
private QuorumInfo.ReplicaState translateReplicaState(DescribeQuorumResponseData.ReplicaState replica) {
return new QuorumInfo.ReplicaState(
replica.replicaId(),
replica.logEndOffset(),
replica.lastFetchTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastFetchTimestamp()),
replica.lastCaughtUpTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastCaughtUpTimestamp()));
}
private QuorumInfo createQuorumResult(final DescribeQuorumResponseData.PartitionData partition) {
List<QuorumInfo.ReplicaState> voters = partition.currentVoters().stream()
.map(this::translateReplicaState)
.collect(Collectors.toList());
List<QuorumInfo.ReplicaState> observers = partition.observers().stream()
.map(this::translateReplicaState)
.collect(Collectors.toList());
return new QuorumInfo(
partition.leaderId(),
partition.leaderEpoch(),
partition.highWatermark(),
voters,
observers
);
}
@Override
DescribeQuorumRequest.Builder createRequest(int timeoutMs) {
return new Builder(DescribeQuorumRequest.singletonRequest(
new TopicPartition(CLUSTER_METADATA_TOPIC_NAME, CLUSTER_METADATA_TOPIC_PARTITION.partition())));
}
@Override
void handleResponse(AbstractResponse response) {
final DescribeQuorumResponse quorumResponse = (DescribeQuorumResponse) response;
if (quorumResponse.data().errorCode() != Errors.NONE.code()) {
throw Errors.forCode(quorumResponse.data().errorCode()).exception();
}
if (quorumResponse.data().topics().size() != 1) {
String msg = String.format("DescribeMetadataQuorum received %d topics when 1 was expected",
quorumResponse.data().topics().size());
log.debug(msg);
throw new UnknownServerException(msg);
}
DescribeQuorumResponseData.TopicData topic = quorumResponse.data().topics().get(0);
if (!topic.topicName().equals(CLUSTER_METADATA_TOPIC_NAME)) {
String msg = String.format("DescribeMetadataQuorum received a topic with name %s when %s was expected",
topic.topicName(), CLUSTER_METADATA_TOPIC_NAME);
log.debug(msg);
throw new UnknownServerException(msg);
}
if (topic.partitions().size() != 1) {
String msg = String.format("DescribeMetadataQuorum received a topic %s with %d partitions when 1 was expected",
topic.topicName(), topic.partitions().size());
log.debug(msg);
throw new UnknownServerException(msg);
}
DescribeQuorumResponseData.PartitionData partition = topic.partitions().get(0);
if (partition.partitionIndex() != CLUSTER_METADATA_TOPIC_PARTITION.partition()) {
String msg = String.format("DescribeMetadataQuorum received a single partition with index %d when %d was expected",
partition.partitionIndex(), CLUSTER_METADATA_TOPIC_PARTITION.partition());
log.debug(msg);
throw new UnknownServerException(msg);
}
if (partition.errorCode() != Errors.NONE.code()) {
throw Errors.forCode(partition.errorCode()).exception();
}
future.complete(createQuorumResult(partition));
}
@Override
void handleFailure(Throwable throwable) {
future.completeExceptionally(throwable);
}
};
runnable.call(call, now);
return new DescribeMetadataQuorumResult(future);
}
@Override
public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options) {
final KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
final long now = time.milliseconds();
final Call call = new Call("unregisterBroker", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
UnregisterBrokerRequest.Builder createRequest(int timeoutMs) {
UnregisterBrokerRequestData data =
new UnregisterBrokerRequestData().setBrokerId(brokerId);
return new UnregisterBrokerRequest.Builder(data);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
final UnregisterBrokerResponse response =
(UnregisterBrokerResponse) abstractResponse;
Errors error = Errors.forCode(response.data().errorCode());
switch (error) {
case NONE:
future.complete(null);
break;
case REQUEST_TIMED_OUT:
throw error.exception();
default:
log.error("Unregister broker request for broker ID {} failed: {}",
brokerId, error.message());
future.completeExceptionally(error.exception());
break;
}
}
@Override
void handleFailure(Throwable throwable) {
future.completeExceptionally(throwable);
}
};
runnable.call(call, now);
return new UnregisterBrokerResult(future);
}
@Override
public DescribeProducersResult describeProducers(Collection<TopicPartition> topicPartitions, DescribeProducersOptions options) {
AdminApiFuture.SimpleAdminApiFuture<TopicPartition, DescribeProducersResult.PartitionProducerState> future =
DescribeProducersHandler.newFuture(topicPartitions);
DescribeProducersHandler handler = new DescribeProducersHandler(options, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new DescribeProducersResult(future.all());
}
@Override
public DescribeTransactionsResult describeTransactions(Collection<String> transactionalIds, DescribeTransactionsOptions options) {
AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, TransactionDescription> future =
DescribeTransactionsHandler.newFuture(transactionalIds);
DescribeTransactionsHandler handler = new DescribeTransactionsHandler(logContext);
invokeDriver(handler, future, options.timeoutMs);
return new DescribeTransactionsResult(future.all());
}
@Override
public AbortTransactionResult abortTransaction(AbortTransactionSpec spec, AbortTransactionOptions options) {
AdminApiFuture.SimpleAdminApiFuture<TopicPartition, Void> future =
AbortTransactionHandler.newFuture(Collections.singleton(spec.topicPartition()));
AbortTransactionHandler handler = new AbortTransactionHandler(spec, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new AbortTransactionResult(future.all());
}
@Override
public ListTransactionsResult listTransactions(ListTransactionsOptions options) {
AllBrokersStrategy.AllBrokersFuture<Collection<TransactionListing>> future =
ListTransactionsHandler.newFuture();
ListTransactionsHandler handler = new ListTransactionsHandler(options, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new ListTransactionsResult(future.all());
}
@Override
public FenceProducersResult fenceProducers(Collection<String> transactionalIds, FenceProducersOptions options) {
AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, ProducerIdAndEpoch> future =
FenceProducersHandler.newFuture(transactionalIds);
FenceProducersHandler handler = new FenceProducersHandler(logContext);
invokeDriver(handler, future, options.timeoutMs);
return new FenceProducersResult(future.all());
}
private <K, V> void invokeDriver(
AdminApiHandler<K, V> handler,
AdminApiFuture<K, V> future,
Integer timeoutMs
) {
long currentTimeMs = time.milliseconds();
long deadlineMs = calcDeadlineMs(currentTimeMs, timeoutMs);
AdminApiDriver<K, V> driver = new AdminApiDriver<>(
handler,
future,
deadlineMs,
retryBackoffMs,
logContext
);
maybeSendRequests(driver, currentTimeMs);
}
private <K, V> void maybeSendRequests(AdminApiDriver<K, V> driver, long currentTimeMs) {
for (AdminApiDriver.RequestSpec<K> spec : driver.poll()) {
runnable.call(newCall(driver, spec), currentTimeMs);
}
}
private <K, V> Call newCall(AdminApiDriver<K, V> driver, AdminApiDriver.RequestSpec<K> spec) {
NodeProvider nodeProvider = spec.scope.destinationBrokerId().isPresent() ?
new ConstantNodeIdProvider(spec.scope.destinationBrokerId().getAsInt()) :
new LeastLoadedNodeProvider();
return new Call(spec.name, spec.nextAllowedTryMs, spec.tries, spec.deadlineMs, nodeProvider) {
@Override
AbstractRequest.Builder<?> createRequest(int timeoutMs) {
return spec.request;
}
@Override
void handleResponse(AbstractResponse response) {
long currentTimeMs = time.milliseconds();
driver.onResponse(currentTimeMs, spec, response, this.curNode());
maybeSendRequests(driver, currentTimeMs);
}
@Override
void handleFailure(Throwable throwable) {
long currentTimeMs = time.milliseconds();
driver.onFailure(currentTimeMs, spec, throwable);
maybeSendRequests(driver, currentTimeMs);
}
@Override
void maybeRetry(long currentTimeMs, Throwable throwable) {
if (throwable instanceof DisconnectException) {
// Disconnects are a special case. We want to give the driver a chance
// to retry lookup rather than getting stuck on a node which is down.
// For example, if a partition leader shuts down after our metadata query,
// then we might get a disconnect. We want to try to find the new partition
// leader rather than retrying on the same node.
driver.onFailure(currentTimeMs, spec, throwable);
maybeSendRequests(driver, currentTimeMs);
} else {
super.maybeRetry(currentTimeMs, throwable);
}
}
};
}
private long getOffsetFromOffsetSpec(OffsetSpec offsetSpec) {
if (offsetSpec instanceof TimestampSpec) {
return ((TimestampSpec) offsetSpec).timestamp();
} else if (offsetSpec instanceof OffsetSpec.EarliestSpec) {
return ListOffsetsRequest.EARLIEST_TIMESTAMP;
} else if (offsetSpec instanceof OffsetSpec.MaxTimestampSpec) {
return ListOffsetsRequest.MAX_TIMESTAMP;
}
return ListOffsetsRequest.LATEST_TIMESTAMP;
}
/**
* Get a sub level error when the request is in batch. If given key was not found,
* return an {@link IllegalArgumentException}.
*/
static <K> Throwable getSubLevelError(Map<K, Errors> subLevelErrors, K subKey, String keyNotFoundMsg) {
if (!subLevelErrors.containsKey(subKey)) {
return new IllegalArgumentException(keyNotFoundMsg);
} else {
return subLevelErrors.get(subKey).exception();
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.List;
/**
* Options for {@link Admin#listConsumerGroupOffsets(java.util.Map)} and {@link Admin#listConsumerGroupOffsets(String)}.
* <p>
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class ListConsumerGroupOffsetsOptions extends AbstractOptions<ListConsumerGroupOffsetsOptions> {
private List<TopicPartition> topicPartitions;
private boolean requireStable = false;
/**
* Set the topic partitions to list as part of the result.
* {@code null} includes all topic partitions.
* <p>
* @deprecated Since 3.3.
* Use {@link Admin#listConsumerGroupOffsets(java.util.Map, ListConsumerGroupOffsetsOptions)}
* to specify topic partitions.
*
* @param topicPartitions List of topic partitions to include
* @return This ListGroupOffsetsOptions
*/
@Deprecated
public ListConsumerGroupOffsetsOptions topicPartitions(List<TopicPartition> topicPartitions) {
this.topicPartitions = topicPartitions;
return this;
}
/**
* Sets an optional requireStable flag.
*/
public ListConsumerGroupOffsetsOptions requireStable(final boolean requireStable) {
this.requireStable = requireStable;
return this;
}
/**
* Returns a list of topic partitions to add as part of the result.
* <p>
* @deprecated Since 3.3.
* Use {@link Admin#listConsumerGroupOffsets(java.util.Map, ListConsumerGroupOffsetsOptions)}
* to specify topic partitions.
*/
@Deprecated
public List<TopicPartition> topicPartitions() {
return topicPartitions;
}
public boolean requireStable() {
return requireStable;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import org.apache.kafka.clients.admin.internals.CoordinatorKey;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* The result of the {@link Admin#listConsumerGroupOffsets(Map)} and
* {@link Admin#listConsumerGroupOffsets(String)} call.
* <p>
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class ListConsumerGroupOffsetsResult {
final Map<String, KafkaFuture<Map<TopicPartition, OffsetAndMetadata>>> futures;
ListConsumerGroupOffsetsResult(final Map<CoordinatorKey, KafkaFuture<Map<TopicPartition, OffsetAndMetadata>>> futures) {
this.futures = futures.entrySet().stream()
.collect(Collectors.toMap(e -> e.getKey().idValue, Entry::getValue));
}
/**
* Return a future which yields a map of topic partitions to OffsetAndMetadata objects.
* If the group does not have a committed offset for this partition, the corresponding value in the returned map will be null.
*/
public KafkaFuture<Map<TopicPartition, OffsetAndMetadata>> partitionsToOffsetAndMetadata() {
if (futures.size() != 1) {
throw new IllegalStateException("Offsets from multiple consumer groups were requested. " +
"Use partitionsToOffsetAndMetadata(groupId) instead to get future for a specific group.");
}
return futures.values().iterator().next();
}
/**
* Return a future which yields a map of topic partitions to OffsetAndMetadata objects for
* the specified group. If the group doesn't have a committed offset for a specific
* partition, the corresponding value in the returned map will be null.
*/
public KafkaFuture<Map<TopicPartition, OffsetAndMetadata>> partitionsToOffsetAndMetadata(String groupId) {
if (!futures.containsKey(groupId))
throw new IllegalArgumentException("Offsets for consumer group '" + groupId + "' were not requested.");
return futures.get(groupId);
}
/**
* Return a future which yields all {@code Map<String, Map<TopicPartition, OffsetAndMetadata>} objects,
* if requests for all the groups succeed.
*/
public KafkaFuture<Map<String, Map<TopicPartition, OffsetAndMetadata>>> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply(
nil -> {
Map<String, Map<TopicPartition, OffsetAndMetadata>> listedConsumerGroupOffsets = new HashMap<>(futures.size());
futures.forEach((key, future) -> {
try {
listedConsumerGroupOffsets.put(key, future.get());
} catch (InterruptedException | ExecutionException e) {
// This should be unreachable, since the KafkaFuture#allOf already ensured
// that all of the futures completed successfully.
throw new RuntimeException(e);
}
});
return listedConsumerGroupOffsets;
});
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.Objects;
/**
* Specification of consumer group offsets to list using {@link Admin#listConsumerGroupOffsets(java.util.Map)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class ListConsumerGroupOffsetsSpec {
private Collection<TopicPartition> topicPartitions;
/**
* Set the topic partitions whose offsets are to be listed for a consumer group.
* {@code null} includes all topic partitions.
*
* @param topicPartitions List of topic partitions to include
* @return This ListConsumerGroupOffsetSpec
*/
public ListConsumerGroupOffsetsSpec topicPartitions(Collection<TopicPartition> topicPartitions) {
this.topicPartitions = topicPartitions;
return this;
}
/**
* Returns the topic partitions whose offsets are to be listed for a consumer group.
* {@code null} indicates that offsets of all partitions of the group are to be listed.
*/
public Collection<TopicPartition> topicPartitions() {
return topicPartitions;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof ListConsumerGroupOffsetsSpec)) {
return false;
}
ListConsumerGroupOffsetsSpec that = (ListConsumerGroupOffsetsSpec) o;
return Objects.equals(topicPartitions, that.topicPartitions);
}
@Override
public int hashCode() {
return Objects.hash(topicPartitions);
}
@Override
public String toString() {
return "ListConsumerGroupOffsetsSpec(" +
"topicPartitions=" + topicPartitions +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListConsumerGroupsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import org.apache.kafka.common.ConsumerGroupState;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* Options for {@link Admin#listConsumerGroups()}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class ListConsumerGroupsOptions extends AbstractOptions<ListConsumerGroupsOptions> {
private Set<ConsumerGroupState> states = Collections.emptySet();
/**
* If states is set, only groups in these states will be returned by listConsumerGroups()
* Otherwise, all groups are returned.
* This operation is supported by brokers with version 2.6.0 or later.
*/
public ListConsumerGroupsOptions inStates(Set<ConsumerGroupState> states) {
this.states = (states == null) ? Collections.emptySet() : new HashSet<>(states);
return this;
}
/**
* Returns the list of States that are requested or empty if no states have been specified
*/
public Set<ConsumerGroupState> states() {
return states;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListConsumerGroupsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import java.util.ArrayList;
import java.util.Collection;
/**
* The result of the {@link Admin#listConsumerGroups()} call.
* <p>
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class ListConsumerGroupsResult {
private final KafkaFutureImpl<Collection<ConsumerGroupListing>> all;
private final KafkaFutureImpl<Collection<ConsumerGroupListing>> valid;
private final KafkaFutureImpl<Collection<Throwable>> errors;
ListConsumerGroupsResult(KafkaFuture<Collection<Object>> future) {
this.all = new KafkaFutureImpl<>();
this.valid = new KafkaFutureImpl<>();
this.errors = new KafkaFutureImpl<>();
future.thenApply(new KafkaFuture.BaseFunction<Collection<Object>, Void>() {
@Override
public Void apply(Collection<Object> results) {
ArrayList<Throwable> curErrors = new ArrayList<>();
ArrayList<ConsumerGroupListing> curValid = new ArrayList<>();
for (Object resultObject : results) {
if (resultObject instanceof Throwable) {
curErrors.add((Throwable) resultObject);
} else {
curValid.add((ConsumerGroupListing) resultObject);
}
}
if (!curErrors.isEmpty()) {
all.completeExceptionally(curErrors.get(0));
} else {
all.complete(curValid);
}
valid.complete(curValid);
errors.complete(curErrors);
return null;
}
});
}
/**
* Returns a future that yields either an exception, or the full set of consumer group
* listings.
*
* In the event of a failure, the future yields nothing but the first exception which
* occurred.
*/
public KafkaFuture<Collection<ConsumerGroupListing>> all() {
return all;
}
/**
* Returns a future which yields just the valid listings.
*
* This future never fails with an error, no matter what happens. Errors are completely
* ignored. If nothing can be fetched, an empty collection is yielded.
* If there is an error, but some results can be returned, this future will yield
* those partial results. When using this future, it is a good idea to also check
* the errors future so that errors can be displayed and handled.
*/
public KafkaFuture<Collection<ConsumerGroupListing>> valid() {
return valid;
}
/**
* Returns a future which yields just the errors which occurred.
*
* If this future yields a non-empty collection, it is very likely that elements are
* missing from the valid() set.
*
* This future itself never fails with an error. In the event of an error, this future
* will successfully yield a collection containing at least one exception.
*/
public KafkaFuture<Collection<Throwable>> errors() {
return errors;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListOffsetsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.IsolationLevel;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* Options for {@link AdminClient#listOffsets(Map)}.
*
* The API of this class is evolving, see {@link AdminClient} for details.
*/
@InterfaceStability.Evolving
public class ListOffsetsOptions extends AbstractOptions<ListOffsetsOptions> {
private final IsolationLevel isolationLevel;
public ListOffsetsOptions() {
this(IsolationLevel.READ_UNCOMMITTED);
}
public ListOffsetsOptions(IsolationLevel isolationLevel) {
this.isolationLevel = isolationLevel;
}
public IsolationLevel isolationLevel() {
return isolationLevel;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListOffsetsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* The result of the {@link AdminClient#listOffsets(Map)} call.
*
* The API of this class is evolving, see {@link AdminClient} for details.
*/
@InterfaceStability.Evolving
public class ListOffsetsResult {
private final Map<TopicPartition, KafkaFuture<ListOffsetsResultInfo>> futures;
public ListOffsetsResult(Map<TopicPartition, KafkaFuture<ListOffsetsResultInfo>> futures) {
this.futures = futures;
}
/**
* Return a future which can be used to check the result for a given partition.
*/
public KafkaFuture<ListOffsetsResultInfo> partitionResult(final TopicPartition partition) {
KafkaFuture<ListOffsetsResultInfo> future = futures.get(partition);
if (future == null) {
throw new IllegalArgumentException(
"List Offsets for partition \"" + partition + "\" was not attempted");
}
return future;
}
/**
* Return a future which succeeds only if offsets for all specified partitions have been successfully
* retrieved.
*/
public KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]))
.thenApply(new KafkaFuture.BaseFunction<Void, Map<TopicPartition, ListOffsetsResultInfo>>() {
@Override
public Map<TopicPartition, ListOffsetsResultInfo> apply(Void v) {
Map<TopicPartition, ListOffsetsResultInfo> offsets = new HashMap<>(futures.size());
for (Map.Entry<TopicPartition, KafkaFuture<ListOffsetsResultInfo>> entry : futures.entrySet()) {
try {
offsets.put(entry.getKey(), entry.getValue().get());
} catch (InterruptedException | ExecutionException e) {
// This should be unreachable, because allOf ensured that all the futures completed successfully.
throw new RuntimeException(e);
}
}
return offsets;
}
});
}
public static class ListOffsetsResultInfo {
private final long offset;
private final long timestamp;
private final Optional<Integer> leaderEpoch;
public ListOffsetsResultInfo(long offset, long timestamp, Optional<Integer> leaderEpoch) {
this.offset = offset;
this.timestamp = timestamp;
this.leaderEpoch = leaderEpoch;
}
public long offset() {
return offset;
}
public long timestamp() {
return timestamp;
}
public Optional<Integer> leaderEpoch() {
return leaderEpoch;
}
@Override
public String toString() {
return "ListOffsetsResultInfo(offset=" + offset + ", timestamp=" + timestamp + ", leaderEpoch="
+ leaderEpoch + ")";
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListPartitionReassignmentsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* Options for {@link AdminClient#listPartitionReassignments(ListPartitionReassignmentsOptions)}
*
* The API of this class is evolving. See {@link AdminClient} for details.
*/
@InterfaceStability.Evolving
public class ListPartitionReassignmentsOptions extends AbstractOptions<ListPartitionReassignmentsOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListPartitionReassignmentsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import java.util.Map;
/**
* The result of {@link AdminClient#listPartitionReassignments(ListPartitionReassignmentsOptions)}.
*
* The API of this class is evolving. See {@link AdminClient} for details.
*/
public class ListPartitionReassignmentsResult {
private final KafkaFuture<Map<TopicPartition, PartitionReassignment>> future;
ListPartitionReassignmentsResult(KafkaFuture<Map<TopicPartition, PartitionReassignment>> reassignments) {
this.future = reassignments;
}
/**
* Return a future which yields a map containing each partition's reassignments
*/
public KafkaFuture<Map<TopicPartition, PartitionReassignment>> reassignments() {
return future;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListTopicsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Objects;
/**
* Options for {@link Admin#listTopics()}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class ListTopicsOptions extends AbstractOptions<ListTopicsOptions> {
private boolean listInternal = false;
/**
* Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the
* AdminClient should be used.
*
*/
// This method is retained to keep binary compatibility with 0.11
public ListTopicsOptions timeoutMs(Integer timeoutMs) {
this.timeoutMs = timeoutMs;
return this;
}
/**
* Set whether we should list internal topics.
*
* @param listInternal Whether we should list internal topics. null means to use
* the default.
* @return This ListTopicsOptions object.
*/
public ListTopicsOptions listInternal(boolean listInternal) {
this.listInternal = listInternal;
return this;
}
/**
* Return true if we should list internal topics.
*/
public boolean shouldListInternal() {
return listInternal;
}
@Override
public String toString() {
return "ListTopicsOptions(" +
"listInternal=" + listInternal +
')';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ListTopicsOptions that = (ListTopicsOptions) o;
return listInternal == that.listInternal;
}
@Override
public int hashCode() {
return Objects.hash(listInternal);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListTopicsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
/**
* The result of the {@link Admin#listTopics()} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class ListTopicsResult {
final KafkaFuture<Map<String, TopicListing>> future;
ListTopicsResult(KafkaFuture<Map<String, TopicListing>> future) {
this.future = future;
}
/**
* Return a future which yields a map of topic names to TopicListing objects.
*/
public KafkaFuture<Map<String, TopicListing>> namesToListings() {
return future;
}
/**
* Return a future which yields a collection of TopicListing objects.
*/
public KafkaFuture<Collection<TopicListing>> listings() {
return future.thenApply(namesToDescriptions -> namesToDescriptions.values());
}
/**
* Return a future which yields a collection of topic names.
*/
public KafkaFuture<Set<String>> names() {
return future.thenApply(namesToListings -> namesToListings.keySet());
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListTransactionsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
/**
* Options for {@link Admin#listTransactions()}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class ListTransactionsOptions extends AbstractOptions<ListTransactionsOptions> {
private Set<TransactionState> filteredStates = Collections.emptySet();
private Set<Long> filteredProducerIds = Collections.emptySet();
/**
* Filter only the transactions that are in a specific set of states. If no filter
* is specified or if the passed set of states is empty, then transactions in all
* states will be returned.
*
* @param states the set of states to filter by
* @return this object
*/
public ListTransactionsOptions filterStates(Collection<TransactionState> states) {
this.filteredStates = new HashSet<>(states);
return this;
}
/**
* Filter only the transactions from producers in a specific set of producerIds.
* If no filter is specified or if the passed collection of producerIds is empty,
* then the transactions of all producerIds will be returned.
*
* @param producerIdFilters the set of producerIds to filter by
* @return this object
*/
public ListTransactionsOptions filterProducerIds(Collection<Long> producerIdFilters) {
this.filteredProducerIds = new HashSet<>(producerIdFilters);
return this;
}
/**
* Returns the set of states to be filtered or empty if no states have been specified.
*
* @return the current set of filtered states (empty means that no states are filtered and all
* all transactions will be returned)
*/
public Set<TransactionState> filteredStates() {
return filteredStates;
}
/**
* Returns the set of producerIds that are being filtered or empty if none have been specified.
*
* @return the current set of filtered states (empty means that no producerIds are filtered and
* all transactions will be returned)
*/
public Set<Long> filteredProducerIds() {
return filteredProducerIds;
}
@Override
public String toString() {
return "ListTransactionsOptions(" +
"filteredStates=" + filteredStates +
", filteredProducerIds=" + filteredProducerIds +
", timeoutMs=" + timeoutMs +
')';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ListTransactionsOptions that = (ListTransactionsOptions) o;
return Objects.equals(filteredStates, that.filteredStates) &&
Objects.equals(filteredProducerIds, that.filteredProducerIds);
}
@Override
public int hashCode() {
return Objects.hash(filteredStates, filteredProducerIds);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ListTransactionsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* The result of the {@link Admin#listTransactions()} call.
* <p>
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class ListTransactionsResult {
private final KafkaFuture<Map<Integer, KafkaFutureImpl<Collection<TransactionListing>>>> future;
ListTransactionsResult(KafkaFuture<Map<Integer, KafkaFutureImpl<Collection<TransactionListing>>>> future) {
this.future = future;
}
/**
* Get all transaction listings. If any of the underlying requests fail, then the future
* returned from this method will also fail with the first encountered error.
*
* @return A future containing the collection of transaction listings. The future completes
* when all transaction listings are available and fails after any non-retriable error.
*/
public KafkaFuture<Collection<TransactionListing>> all() {
return allByBrokerId().thenApply(map -> {
List<TransactionListing> allListings = new ArrayList<>();
for (Collection<TransactionListing> listings : map.values()) {
allListings.addAll(listings);
}
return allListings;
});
}
/**
* Get a future which returns a map containing the underlying listing future for each broker
* in the cluster. This is useful, for example, if a partial listing of transactions is
* sufficient, or if you want more granular error details.
*
* @return A future containing a map of futures by broker which complete individually when
* their respective transaction listings are available. The top-level future returned
* from this method may fail if the admin client is unable to lookup the available
* brokers in the cluster.
*/
public KafkaFuture<Map<Integer, KafkaFuture<Collection<TransactionListing>>>> byBrokerId() {
KafkaFutureImpl<Map<Integer, KafkaFuture<Collection<TransactionListing>>>> result = new KafkaFutureImpl<>();
future.whenComplete((brokerFutures, exception) -> {
if (brokerFutures != null) {
Map<Integer, KafkaFuture<Collection<TransactionListing>>> brokerFuturesCopy =
new HashMap<>(brokerFutures.size());
brokerFuturesCopy.putAll(brokerFutures);
result.complete(brokerFuturesCopy);
} else {
result.completeExceptionally(exception);
}
});
return result;
}
/**
* Get all transaction listings in a map which is keyed by the ID of respective broker
* that is currently managing them. If any of the underlying requests fail, then the future
* returned from this method will also fail with the first encountered error.
*
* @return A future containing a map from the broker ID to the transactions hosted by that
* broker respectively. This future completes when all transaction listings are
* available and fails after any non-retriable error.
*/
public KafkaFuture<Map<Integer, Collection<TransactionListing>>> allByBrokerId() {
KafkaFutureImpl<Map<Integer, Collection<TransactionListing>>> allFuture = new KafkaFutureImpl<>();
Map<Integer, Collection<TransactionListing>> allListingsMap = new HashMap<>();
future.whenComplete((map, topLevelException) -> {
if (topLevelException != null) {
allFuture.completeExceptionally(topLevelException);
return;
}
Set<Integer> remainingResponses = new HashSet<>(map.keySet());
map.forEach((brokerId, future) -> {
future.whenComplete((listings, brokerException) -> {
if (brokerException != null) {
allFuture.completeExceptionally(brokerException);
} else if (!allFuture.isDone()) {
allListingsMap.put(brokerId, listings);
remainingResponses.remove(brokerId);
if (remainingResponses.isEmpty()) {
allFuture.complete(allListingsMap);
}
}
});
});
});
return allFuture;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/LogDirDescription.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.ApiException;
import java.util.Map;
import java.util.OptionalLong;
import static java.util.Collections.unmodifiableMap;
import static org.apache.kafka.common.requests.DescribeLogDirsResponse.UNKNOWN_VOLUME_BYTES;
/**
* A description of a log directory on a particular broker.
*/
public class LogDirDescription {
private final Map<TopicPartition, ReplicaInfo> replicaInfos;
private final ApiException error;
private final OptionalLong totalBytes;
private final OptionalLong usableBytes;
public LogDirDescription(ApiException error, Map<TopicPartition, ReplicaInfo> replicaInfos) {
this(error, replicaInfos, UNKNOWN_VOLUME_BYTES, UNKNOWN_VOLUME_BYTES);
}
public LogDirDescription(ApiException error, Map<TopicPartition, ReplicaInfo> replicaInfos, long totalBytes, long usableBytes) {
this.error = error;
this.replicaInfos = replicaInfos;
this.totalBytes = (totalBytes == UNKNOWN_VOLUME_BYTES) ? OptionalLong.empty() : OptionalLong.of(totalBytes);
this.usableBytes = (usableBytes == UNKNOWN_VOLUME_BYTES) ? OptionalLong.empty() : OptionalLong.of(usableBytes);
}
/**
* Returns `ApiException` if the log directory is offline or an error occurred, otherwise returns null.
* <ul>
* <li> KafkaStorageException - The log directory is offline.
* <li> UnknownServerException - The server experienced an unexpected error when processing the request.
* </ul>
*/
public ApiException error() {
return error;
}
/**
* A map from topic partition to replica information for that partition
* in this log directory.
*/
public Map<TopicPartition, ReplicaInfo> replicaInfos() {
return unmodifiableMap(replicaInfos);
}
/**
* The total size of the volume this log directory is on or empty if the broker did not return a value.
* For volumes larger than Long.MAX_VALUE, Long.MAX_VALUE is returned.
*/
public OptionalLong totalBytes() {
return totalBytes;
}
/**
* The usable size on the volume this log directory is on or empty if the broker did not return a value.
* For usable sizes larger than Long.MAX_VALUE, Long.MAX_VALUE is returned.
*/
public OptionalLong usableBytes() {
return usableBytes;
}
@Override
public String toString() {
return "LogDirDescription(" +
"replicaInfos=" + replicaInfos +
", error=" + error +
", totalBytes=" + totalBytes +
", usableBytes=" + usableBytes +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/MemberAssignment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.utils.Utils;
import java.util.Collections;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
/**
* A description of the assignments of a specific group member.
*/
public class MemberAssignment {
private final Set<TopicPartition> topicPartitions;
/**
* Creates an instance with the specified parameters.
*
* @param topicPartitions List of topic partitions
*/
public MemberAssignment(Set<TopicPartition> topicPartitions) {
this.topicPartitions = topicPartitions == null ? Collections.<TopicPartition>emptySet() :
Collections.unmodifiableSet(new HashSet<>(topicPartitions));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MemberAssignment that = (MemberAssignment) o;
return Objects.equals(topicPartitions, that.topicPartitions);
}
@Override
public int hashCode() {
return topicPartitions != null ? topicPartitions.hashCode() : 0;
}
/**
* The topic partitions assigned to a group member.
*/
public Set<TopicPartition> topicPartitions() {
return topicPartitions;
}
@Override
public String toString() {
return "(topicPartitions=" + Utils.join(topicPartitions, ",") + ")";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/MemberDescription.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Collections;
import java.util.Objects;
import java.util.Optional;
/**
* A detailed description of a single group instance in the cluster.
*/
public class MemberDescription {
private final String memberId;
private final Optional<String> groupInstanceId;
private final String clientId;
private final String host;
private final MemberAssignment assignment;
public MemberDescription(String memberId,
Optional<String> groupInstanceId,
String clientId,
String host,
MemberAssignment assignment) {
this.memberId = memberId == null ? "" : memberId;
this.groupInstanceId = groupInstanceId;
this.clientId = clientId == null ? "" : clientId;
this.host = host == null ? "" : host;
this.assignment = assignment == null ?
new MemberAssignment(Collections.emptySet()) : assignment;
}
public MemberDescription(String memberId,
String clientId,
String host,
MemberAssignment assignment) {
this(memberId, Optional.empty(), clientId, host, assignment);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MemberDescription that = (MemberDescription) o;
return memberId.equals(that.memberId) &&
groupInstanceId.equals(that.groupInstanceId) &&
clientId.equals(that.clientId) &&
host.equals(that.host) &&
assignment.equals(that.assignment);
}
@Override
public int hashCode() {
return Objects.hash(memberId, groupInstanceId, clientId, host, assignment);
}
/**
* The consumer id of the group member.
*/
public String consumerId() {
return memberId;
}
/**
* The instance id of the group member.
*/
public Optional<String> groupInstanceId() {
return groupInstanceId;
}
/**
* The client id of the group member.
*/
public String clientId() {
return clientId;
}
/**
* The host where the group member is running.
*/
public String host() {
return host;
}
/**
* The assignment of the group member.
*/
public MemberAssignment assignment() {
return assignment;
}
@Override
public String toString() {
return "(memberId=" + memberId +
", groupInstanceId=" + groupInstanceId.orElse("null") +
", clientId=" + clientId +
", host=" + host +
", assignment=" + assignment + ")";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/MemberToRemove.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity;
import org.apache.kafka.common.requests.JoinGroupRequest;
import java.util.Objects;
/**
* A struct containing information about the member to be removed.
*/
public class MemberToRemove {
private final String groupInstanceId;
public MemberToRemove(String groupInstanceId) {
this.groupInstanceId = groupInstanceId;
}
@Override
public boolean equals(Object o) {
if (o instanceof MemberToRemove) {
MemberToRemove otherMember = (MemberToRemove) o;
return this.groupInstanceId.equals(otherMember.groupInstanceId);
} else {
return false;
}
}
@Override
public int hashCode() {
return Objects.hash(groupInstanceId);
}
MemberIdentity toMemberIdentity() {
return new MemberIdentity()
.setGroupInstanceId(groupInstanceId)
.setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID);
}
public String groupInstanceId() {
return groupInstanceId;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/NewPartitionReassignment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* A new partition reassignment, which can be applied via {@link AdminClient#alterPartitionReassignments(Map, AlterPartitionReassignmentsOptions)}.
*/
public class NewPartitionReassignment {
private final List<Integer> targetReplicas;
/**
* @throws IllegalArgumentException if no replicas are supplied
*/
public NewPartitionReassignment(List<Integer> targetReplicas) {
if (targetReplicas == null || targetReplicas.size() == 0)
throw new IllegalArgumentException("Cannot create a new partition reassignment without any replicas");
this.targetReplicas = Collections.unmodifiableList(new ArrayList<>(targetReplicas));
}
public List<Integer> targetReplicas() {
return targetReplicas;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/NewPartitions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.List;
import java.util.Map;
/**
* Describes new partitions for a particular topic in a call to {@link Admin#createPartitions(Map)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class NewPartitions {
private int totalCount;
private List<List<Integer>> newAssignments;
private NewPartitions(int totalCount, List<List<Integer>> newAssignments) {
this.totalCount = totalCount;
this.newAssignments = newAssignments;
}
/**
* Increase the partition count for a topic to the given {@code totalCount}.
* The assignment of new replicas to brokers will be decided by the broker.
*
* @param totalCount The total number of partitions after the operation succeeds.
*/
public static NewPartitions increaseTo(int totalCount) {
return new NewPartitions(totalCount, null);
}
/**
* <p>Increase the partition count for a topic to the given {@code totalCount}
* assigning the new partitions according to the given {@code newAssignments}.
* The length of the given {@code newAssignments} should equal {@code totalCount - oldCount}, since
* the assignment of existing partitions are not changed.
* Each inner list of {@code newAssignments} should have a length equal to
* the topic's replication factor.
* The first broker id in each inner list is the "preferred replica".</p>
*
* <p>For example, suppose a topic currently has a replication factor of 2, and
* has 3 partitions. The number of partitions can be increased to 6 using a
* {@code NewPartition} constructed like this:</p>
*
* <pre><code>
* NewPartitions.increaseTo(6, asList(asList(1, 2),
* asList(2, 3),
* asList(3, 1)))
* </code></pre>
* <p>In this example partition 3's preferred leader will be broker 1, partition 4's preferred leader will be
* broker 2 and partition 5's preferred leader will be broker 3.</p>
*
* @param totalCount The total number of partitions after the operation succeeds.
* @param newAssignments The replica assignments for the new partitions.
*/
public static NewPartitions increaseTo(int totalCount, List<List<Integer>> newAssignments) {
return new NewPartitions(totalCount, newAssignments);
}
/**
* The total number of partitions after the operation succeeds.
*/
public int totalCount() {
return totalCount;
}
/**
* The replica assignments for the new partitions, or null if the assignment will be done by the controller.
*/
public List<List<Integer>> assignments() {
return newAssignments;
}
@Override
public String toString() {
return "(totalCount=" + totalCount() + ", newAssignments=" + assignments() + ")";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/NewTopic.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Optional;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignment;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreateableTopicConfig;
import org.apache.kafka.common.requests.CreateTopicsRequest;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Map.Entry;
/**
* A new topic to be created via {@link Admin#createTopics(Collection)}.
*/
public class NewTopic {
private final String name;
private final Optional<Integer> numPartitions;
private final Optional<Short> replicationFactor;
private final Map<Integer, List<Integer>> replicasAssignments;
private Map<String, String> configs = null;
/**
* A new topic with the specified replication factor and number of partitions.
*/
public NewTopic(String name, int numPartitions, short replicationFactor) {
this(name, Optional.of(numPartitions), Optional.of(replicationFactor));
}
/**
* A new topic that optionally defaults {@code numPartitions} and {@code replicationFactor} to
* the broker configurations for {@code num.partitions} and {@code default.replication.factor}
* respectively.
*/
public NewTopic(String name, Optional<Integer> numPartitions, Optional<Short> replicationFactor) {
this.name = name;
this.numPartitions = numPartitions;
this.replicationFactor = replicationFactor;
this.replicasAssignments = null;
}
/**
* A new topic with the specified replica assignment configuration.
*
* @param name the topic name.
* @param replicasAssignments a map from partition id to replica ids (i.e. broker ids). Although not enforced, it is
* generally a good idea for all partitions to have the same number of replicas.
*/
public NewTopic(String name, Map<Integer, List<Integer>> replicasAssignments) {
this.name = name;
this.numPartitions = Optional.empty();
this.replicationFactor = Optional.empty();
this.replicasAssignments = Collections.unmodifiableMap(replicasAssignments);
}
/**
* The name of the topic to be created.
*/
public String name() {
return name;
}
/**
* The number of partitions for the new topic or -1 if a replica assignment has been specified.
*/
public int numPartitions() {
return numPartitions.orElse(CreateTopicsRequest.NO_NUM_PARTITIONS);
}
/**
* The replication factor for the new topic or -1 if a replica assignment has been specified.
*/
public short replicationFactor() {
return replicationFactor.orElse(CreateTopicsRequest.NO_REPLICATION_FACTOR);
}
/**
* A map from partition id to replica ids (i.e. broker ids) or null if the number of partitions and replication
* factor have been specified instead.
*/
public Map<Integer, List<Integer>> replicasAssignments() {
return replicasAssignments;
}
/**
* Set the configuration to use on the new topic.
*
* @param configs The configuration map.
* @return This NewTopic object.
*/
public NewTopic configs(Map<String, String> configs) {
this.configs = configs;
return this;
}
/**
* The configuration for the new topic or null if no configs ever specified.
*/
public Map<String, String> configs() {
return configs;
}
CreatableTopic convertToCreatableTopic() {
CreatableTopic creatableTopic = new CreatableTopic().
setName(name).
setNumPartitions(numPartitions.orElse(CreateTopicsRequest.NO_NUM_PARTITIONS)).
setReplicationFactor(replicationFactor.orElse(CreateTopicsRequest.NO_REPLICATION_FACTOR));
if (replicasAssignments != null) {
for (Entry<Integer, List<Integer>> entry : replicasAssignments.entrySet()) {
creatableTopic.assignments().add(
new CreatableReplicaAssignment().
setPartitionIndex(entry.getKey()).
setBrokerIds(entry.getValue()));
}
}
if (configs != null) {
for (Entry<String, String> entry : configs.entrySet()) {
creatableTopic.configs().add(
new CreateableTopicConfig().
setName(entry.getKey()).
setValue(entry.getValue()));
}
}
return creatableTopic;
}
@Override
public String toString() {
StringBuilder bld = new StringBuilder();
bld.append("(name=").append(name).
append(", numPartitions=").append(numPartitions.map(String::valueOf).orElse("default")).
append(", replicationFactor=").append(replicationFactor.map(String::valueOf).orElse("default")).
append(", replicasAssignments=").append(replicasAssignments).
append(", configs=").append(configs).
append(")");
return bld.toString();
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final NewTopic that = (NewTopic) o;
return Objects.equals(name, that.name) &&
Objects.equals(numPartitions, that.numPartitions) &&
Objects.equals(replicationFactor, that.replicationFactor) &&
Objects.equals(replicasAssignments, that.replicasAssignments) &&
Objects.equals(configs, that.configs);
}
@Override
public int hashCode() {
return Objects.hash(name, numPartitions, replicationFactor, replicasAssignments, configs);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/OffsetSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Map;
/**
* This class allows to specify the desired offsets when using {@link KafkaAdminClient#listOffsets(Map, ListOffsetsOptions)}
*/
public class OffsetSpec {
public static class EarliestSpec extends OffsetSpec { }
public static class LatestSpec extends OffsetSpec { }
public static class MaxTimestampSpec extends OffsetSpec { }
public static class TimestampSpec extends OffsetSpec {
private final long timestamp;
TimestampSpec(long timestamp) {
this.timestamp = timestamp;
}
long timestamp() {
return timestamp;
}
}
/**
* Used to retrieve the latest offset of a partition
*/
public static OffsetSpec latest() {
return new LatestSpec();
}
/**
* Used to retrieve the earliest offset of a partition
*/
public static OffsetSpec earliest() {
return new EarliestSpec();
}
/**
* Used to retrieve the earliest offset whose timestamp is greater than
* or equal to the given timestamp in the corresponding partition
* @param timestamp in milliseconds
*/
public static OffsetSpec forTimestamp(long timestamp) {
return new TimestampSpec(timestamp);
}
/**
* Used to retrieve the offset with the largest timestamp of a partition
* as message timestamps can be specified client side this may not match
* the log end offset returned by LatestSpec
*/
public static OffsetSpec maxTimestamp() {
return new MaxTimestampSpec();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/PartitionReassignment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Collections;
import java.util.List;
/**
* A partition reassignment, which has been listed via {@link AdminClient#listPartitionReassignments()}.
*/
public class PartitionReassignment {
private final List<Integer> replicas;
private final List<Integer> addingReplicas;
private final List<Integer> removingReplicas;
public PartitionReassignment(List<Integer> replicas, List<Integer> addingReplicas, List<Integer> removingReplicas) {
this.replicas = Collections.unmodifiableList(replicas);
this.addingReplicas = Collections.unmodifiableList(addingReplicas);
this.removingReplicas = Collections.unmodifiableList(removingReplicas);
}
/**
* The brokers which this partition currently resides on.
*/
public List<Integer> replicas() {
return replicas;
}
/**
* The brokers that we are adding this partition to as part of a reassignment.
* A subset of replicas.
*/
public List<Integer> addingReplicas() {
return addingReplicas;
}
/**
* The brokers that we are removing this partition from as part of a reassignment.
* A subset of replicas.
*/
public List<Integer> removingReplicas() {
return removingReplicas;
}
@Override
public String toString() {
return "PartitionReassignment(" +
"replicas=" + replicas +
", addingReplicas=" + addingReplicas +
", removingReplicas=" + removingReplicas +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ProducerState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Objects;
import java.util.OptionalInt;
import java.util.OptionalLong;
public class ProducerState {
private final long producerId;
private final int producerEpoch;
private final int lastSequence;
private final long lastTimestamp;
private final OptionalInt coordinatorEpoch;
private final OptionalLong currentTransactionStartOffset;
public ProducerState(
long producerId,
int producerEpoch,
int lastSequence,
long lastTimestamp,
OptionalInt coordinatorEpoch,
OptionalLong currentTransactionStartOffset
) {
this.producerId = producerId;
this.producerEpoch = producerEpoch;
this.lastSequence = lastSequence;
this.lastTimestamp = lastTimestamp;
this.coordinatorEpoch = coordinatorEpoch;
this.currentTransactionStartOffset = currentTransactionStartOffset;
}
public long producerId() {
return producerId;
}
public int producerEpoch() {
return producerEpoch;
}
public int lastSequence() {
return lastSequence;
}
public long lastTimestamp() {
return lastTimestamp;
}
public OptionalLong currentTransactionStartOffset() {
return currentTransactionStartOffset;
}
public OptionalInt coordinatorEpoch() {
return coordinatorEpoch;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ProducerState that = (ProducerState) o;
return producerId == that.producerId &&
producerEpoch == that.producerEpoch &&
lastSequence == that.lastSequence &&
lastTimestamp == that.lastTimestamp &&
Objects.equals(coordinatorEpoch, that.coordinatorEpoch) &&
Objects.equals(currentTransactionStartOffset, that.currentTransactionStartOffset);
}
@Override
public int hashCode() {
return Objects.hash(producerId, producerEpoch, lastSequence, lastTimestamp,
coordinatorEpoch, currentTransactionStartOffset);
}
@Override
public String toString() {
return "ProducerState(" +
"producerId=" + producerId +
", producerEpoch=" + producerEpoch +
", lastSequence=" + lastSequence +
", lastTimestamp=" + lastTimestamp +
", coordinatorEpoch=" + coordinatorEpoch +
", currentTransactionStartOffset=" + currentTransactionStartOffset +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/QuorumInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.List;
import java.util.Objects;
import java.util.OptionalLong;
/**
* This class is used to describe the state of the quorum received in DescribeQuorumResponse.
*/
public class QuorumInfo {
private final int leaderId;
private final long leaderEpoch;
private final long highWatermark;
private final List<ReplicaState> voters;
private final List<ReplicaState> observers;
QuorumInfo(
int leaderId,
long leaderEpoch,
long highWatermark,
List<ReplicaState> voters,
List<ReplicaState> observers
) {
this.leaderId = leaderId;
this.leaderEpoch = leaderEpoch;
this.highWatermark = highWatermark;
this.voters = voters;
this.observers = observers;
}
public int leaderId() {
return leaderId;
}
public long leaderEpoch() {
return leaderEpoch;
}
public long highWatermark() {
return highWatermark;
}
public List<ReplicaState> voters() {
return voters;
}
public List<ReplicaState> observers() {
return observers;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
QuorumInfo that = (QuorumInfo) o;
return leaderId == that.leaderId
&& leaderEpoch == that.leaderEpoch
&& highWatermark == that.highWatermark
&& Objects.equals(voters, that.voters)
&& Objects.equals(observers, that.observers);
}
@Override
public int hashCode() {
return Objects.hash(leaderId, leaderEpoch, highWatermark, voters, observers);
}
@Override
public String toString() {
return "QuorumInfo(" +
"leaderId=" + leaderId +
", leaderEpoch=" + leaderEpoch +
", highWatermark=" + highWatermark +
", voters=" + voters +
", observers=" + observers +
')';
}
public static class ReplicaState {
private final int replicaId;
private final long logEndOffset;
private final OptionalLong lastFetchTimestamp;
private final OptionalLong lastCaughtUpTimestamp;
ReplicaState() {
this(0, 0, OptionalLong.empty(), OptionalLong.empty());
}
ReplicaState(
int replicaId,
long logEndOffset,
OptionalLong lastFetchTimestamp,
OptionalLong lastCaughtUpTimestamp
) {
this.replicaId = replicaId;
this.logEndOffset = logEndOffset;
this.lastFetchTimestamp = lastFetchTimestamp;
this.lastCaughtUpTimestamp = lastCaughtUpTimestamp;
}
/**
* Return the ID for this replica.
* @return The ID for this replica
*/
public int replicaId() {
return replicaId;
}
/**
* Return the logEndOffset known by the leader for this replica.
* @return The logEndOffset for this replica
*/
public long logEndOffset() {
return logEndOffset;
}
/**
* Return the last millisecond timestamp that the leader received a
* fetch from this replica.
* @return The value of the lastFetchTime if known, empty otherwise
*/
public OptionalLong lastFetchTimestamp() {
return lastFetchTimestamp;
}
/**
* Return the last millisecond timestamp at which this replica was known to be
* caught up with the leader.
* @return The value of the lastCaughtUpTime if known, empty otherwise
*/
public OptionalLong lastCaughtUpTimestamp() {
return lastCaughtUpTimestamp;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ReplicaState that = (ReplicaState) o;
return replicaId == that.replicaId
&& logEndOffset == that.logEndOffset
&& lastFetchTimestamp.equals(that.lastFetchTimestamp)
&& lastCaughtUpTimestamp.equals(that.lastCaughtUpTimestamp);
}
@Override
public int hashCode() {
return Objects.hash(replicaId, logEndOffset, lastFetchTimestamp, lastCaughtUpTimestamp);
}
@Override
public String toString() {
return "ReplicaState(" +
"replicaId=" + replicaId +
", logEndOffset=" + logEndOffset +
", lastFetchTimestamp=" + lastFetchTimestamp +
", lastCaughtUpTimestamp=" + lastCaughtUpTimestamp +
')';
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/RecordsToDelete.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* Describe records to delete in a call to {@link Admin#deleteRecords(Map)}
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class RecordsToDelete {
private long offset;
private RecordsToDelete(long offset) {
this.offset = offset;
}
/**
* Delete all the records before the given {@code offset}
*
* @param offset the offset before which all records will be deleted
*/
public static RecordsToDelete beforeOffset(long offset) {
return new RecordsToDelete(offset);
}
/**
* The offset before which all records will be deleted
*/
public long beforeOffset() {
return offset;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RecordsToDelete that = (RecordsToDelete) o;
return this.offset == that.offset;
}
@Override
public int hashCode() {
return (int) offset;
}
@Override
public String toString() {
return "(beforeOffset = " + offset + ")";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
/**
* Options for {@link AdminClient#removeMembersFromConsumerGroup(String, RemoveMembersFromConsumerGroupOptions)}.
* It carries the members to be removed from the consumer group.
*
* The API of this class is evolving, see {@link AdminClient} for details.
*/
@InterfaceStability.Evolving
public class RemoveMembersFromConsumerGroupOptions extends AbstractOptions<RemoveMembersFromConsumerGroupOptions> {
private Set<MemberToRemove> members;
private String reason;
public RemoveMembersFromConsumerGroupOptions(Collection<MemberToRemove> members) {
if (members.isEmpty()) {
throw new IllegalArgumentException("Invalid empty members has been provided");
}
this.members = new HashSet<>(members);
}
public RemoveMembersFromConsumerGroupOptions() {
this.members = Collections.emptySet();
}
/**
* Sets an optional reason.
*/
public void reason(final String reason) {
this.reason = reason;
}
public Set<MemberToRemove> members() {
return members;
}
public String reason() {
return reason;
}
public boolean removeAll() {
return members.isEmpty();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity;
import org.apache.kafka.common.protocol.Errors;
import java.util.Map;
import java.util.Set;
/**
* The result of the {@link Admin#removeMembersFromConsumerGroup(String, RemoveMembersFromConsumerGroupOptions)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
public class RemoveMembersFromConsumerGroupResult {
private final KafkaFuture<Map<MemberIdentity, Errors>> future;
private final Set<MemberToRemove> memberInfos;
RemoveMembersFromConsumerGroupResult(KafkaFuture<Map<MemberIdentity, Errors>> future,
Set<MemberToRemove> memberInfos) {
this.future = future;
this.memberInfos = memberInfos;
}
/**
* Returns a future which indicates whether the request was 100% success, i.e. no
* either top level or member level error.
* If not, the first member error shall be returned.
*/
public KafkaFuture<Void> all() {
final KafkaFutureImpl<Void> result = new KafkaFutureImpl<>();
this.future.whenComplete((memberErrors, throwable) -> {
if (throwable != null) {
result.completeExceptionally(throwable);
} else {
if (removeAll()) {
for (Map.Entry<MemberIdentity, Errors> entry: memberErrors.entrySet()) {
Exception exception = entry.getValue().exception();
if (exception != null) {
Throwable ex = new KafkaException("Encounter exception when trying to remove: "
+ entry.getKey(), exception);
result.completeExceptionally(ex);
return;
}
}
} else {
for (MemberToRemove memberToRemove : memberInfos) {
if (maybeCompleteExceptionally(memberErrors, memberToRemove.toMemberIdentity(), result)) {
return;
}
}
}
result.complete(null);
}
});
return result;
}
/**
* Returns the selected member future.
*/
public KafkaFuture<Void> memberResult(MemberToRemove member) {
if (removeAll()) {
throw new IllegalArgumentException("The method: memberResult is not applicable in 'removeAll' mode");
}
if (!memberInfos.contains(member)) {
throw new IllegalArgumentException("Member " + member + " was not included in the original request");
}
final KafkaFutureImpl<Void> result = new KafkaFutureImpl<>();
this.future.whenComplete((memberErrors, throwable) -> {
if (throwable != null) {
result.completeExceptionally(throwable);
} else if (!maybeCompleteExceptionally(memberErrors, member.toMemberIdentity(), result)) {
result.complete(null);
}
});
return result;
}
private boolean maybeCompleteExceptionally(Map<MemberIdentity, Errors> memberErrors,
MemberIdentity member,
KafkaFutureImpl<Void> result) {
Throwable exception = KafkaAdminClient.getSubLevelError(memberErrors, member,
"Member \"" + member + "\" was not included in the removal response");
if (exception != null) {
result.completeExceptionally(exception);
return true;
} else {
return false;
}
}
private boolean removeAll() {
return memberInfos.isEmpty();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/RenewDelegationTokenOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* Options for {@link Admin#renewDelegationToken(byte[], RenewDelegationTokenOptions)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class RenewDelegationTokenOptions extends AbstractOptions<RenewDelegationTokenOptions> {
private long renewTimePeriodMs = -1;
public RenewDelegationTokenOptions renewTimePeriodMs(long renewTimePeriodMs) {
this.renewTimePeriodMs = renewTimePeriodMs;
return this;
}
public long renewTimePeriodMs() {
return renewTimePeriodMs;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/RenewDelegationTokenResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* The result of the {@link KafkaAdminClient#expireDelegationToken(byte[], ExpireDelegationTokenOptions)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class RenewDelegationTokenResult {
private final KafkaFuture<Long> expiryTimestamp;
RenewDelegationTokenResult(KafkaFuture<Long> expiryTimestamp) {
this.expiryTimestamp = expiryTimestamp;
}
/**
* Returns a future which yields expiry timestamp
*/
public KafkaFuture<Long> expiryTimestamp() {
return expiryTimestamp;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ReplicaInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
/**
* A description of a replica on a particular broker.
*/
public class ReplicaInfo {
private final long size;
private final long offsetLag;
private final boolean isFuture;
public ReplicaInfo(long size, long offsetLag, boolean isFuture) {
this.size = size;
this.offsetLag = offsetLag;
this.isFuture = isFuture;
}
/**
* The total size of the log segments in this replica in bytes.
*/
public long size() {
return size;
}
/**
* The lag of the log's LEO with respect to the partition's
* high watermark (if it is the current log for the partition)
* or the current replica's LEO (if it is the {@linkplain #isFuture() future log}
* for the partition).
*/
public long offsetLag() {
return offsetLag;
}
/**
* Whether this replica has been created by a AlterReplicaLogDirsRequest
* but not yet replaced the current replica on the broker.
*
* @return true if this log is created by AlterReplicaLogDirsRequest and will replace the current log
* of the replica at some time in the future.
*/
public boolean isFuture() {
return isFuture;
}
@Override
public String toString() {
return "ReplicaInfo(" +
"size=" + size +
", offsetLag=" + offsetLag +
", isFuture=" + isFuture +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ScramCredentialInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Objects;
/**
* Mechanism and iterations for a SASL/SCRAM credential associated with a user.
*
* @see <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-554%3A+Add+Broker-side+SCRAM+Config+API">KIP-554: Add Broker-side SCRAM Config API</a>
*/
public class ScramCredentialInfo {
private final ScramMechanism mechanism;
private final int iterations;
/**
*
* @param mechanism the required mechanism
* @param iterations the number of iterations used when creating the credential
*/
public ScramCredentialInfo(ScramMechanism mechanism, int iterations) {
this.mechanism = Objects.requireNonNull(mechanism);
this.iterations = iterations;
}
/**
*
* @return the mechanism
*/
public ScramMechanism mechanism() {
return mechanism;
}
/**
*
* @return the number of iterations used when creating the credential
*/
public int iterations() {
return iterations;
}
@Override
public String toString() {
return "ScramCredentialInfo{" +
"mechanism=" + mechanism +
", iterations=" + iterations +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ScramCredentialInfo that = (ScramCredentialInfo) o;
return iterations == that.iterations &&
mechanism == that.mechanism;
}
@Override
public int hashCode() {
return Objects.hash(mechanism, iterations);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ScramMechanism.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Arrays;
/**
* Representation of a SASL/SCRAM Mechanism.
*
* @see <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-554%3A+Add+Broker-side+SCRAM+Config+API">KIP-554: Add Broker-side SCRAM Config API</a>
*
* This code is duplicated in org.apache.kafka.common.security.scram.internals.ScramMechanism.
* The type field in both files must match and must not change. The type field
* is used both for passing ScramCredentialUpsertion and for the internal
* UserScramCredentialRecord. Do not change the type field.
*/
public enum ScramMechanism {
UNKNOWN((byte) 0),
SCRAM_SHA_256((byte) 1),
SCRAM_SHA_512((byte) 2);
private static final ScramMechanism[] VALUES = values();
/**
*
* @param type the type indicator
* @return the instance corresponding to the given type indicator, otherwise {@link #UNKNOWN}
*/
public static ScramMechanism fromType(byte type) {
for (ScramMechanism scramMechanism : VALUES) {
if (scramMechanism.type == type) {
return scramMechanism;
}
}
return UNKNOWN;
}
/**
*
* @param mechanismName the SASL SCRAM mechanism name
* @return the corresponding SASL SCRAM mechanism enum, otherwise {@link #UNKNOWN}
* @see <a href="https://tools.ietf.org/html/rfc5802#section-4">
* Salted Challenge Response Authentication Mechanism (SCRAM) SASL and GSS-API Mechanisms, Section 4</a>
*/
public static ScramMechanism fromMechanismName(String mechanismName) {
return Arrays.stream(VALUES)
.filter(mechanism -> mechanism.mechanismName.equals(mechanismName))
.findFirst()
.orElse(UNKNOWN);
}
/**
*
* @return the corresponding SASL SCRAM mechanism name
* @see <a href="https://tools.ietf.org/html/rfc5802#section-4">
* Salted Challenge Response Authentication Mechanism (SCRAM) SASL and GSS-API Mechanisms, Section 4</a>
*/
public String mechanismName() {
return this.mechanismName;
}
/**
*
* @return the type indicator for this SASL SCRAM mechanism
*/
public byte type() {
return this.type;
}
private final byte type;
private final String mechanismName;
private ScramMechanism(byte type) {
this.type = type;
this.mechanismName = toString().replace('_', '-');
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/SupportedVersionRange.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Objects;
/**
* Represents a range of versions that a particular broker supports for some feature.
*/
public class SupportedVersionRange {
private final short minVersion;
private final short maxVersion;
/**
* Raises an exception unless the following conditions are met:
* 1 <= minVersion <= maxVersion.
*
* @param minVersion The minimum version value.
* @param maxVersion The maximum version value.
*
* @throws IllegalArgumentException Raised when the condition described above is not met.
*/
SupportedVersionRange(final short minVersion, final short maxVersion) {
if (minVersion < 0 || maxVersion < 0 || maxVersion < minVersion) {
throw new IllegalArgumentException(
String.format(
"Expected 0 <= minVersion <= maxVersion but received minVersion:%d, maxVersion:%d.",
minVersion,
maxVersion));
}
this.minVersion = minVersion;
this.maxVersion = maxVersion;
}
public short minVersion() {
return minVersion;
}
public short maxVersion() {
return maxVersion;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
final SupportedVersionRange that = (SupportedVersionRange) other;
return this.minVersion == that.minVersion && this.maxVersion == that.maxVersion;
}
@Override
public int hashCode() {
return Objects.hash(minVersion, maxVersion);
}
@Override
public String toString() {
return String.format("SupportedVersionRange[min_version:%d, max_version:%d]", minVersion, maxVersion);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/TopicDescription.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.TopicPartitionInfo;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.utils.Utils;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Set;
/**
* A detailed description of a single topic in the cluster.
*/
public class TopicDescription {
private final String name;
private final boolean internal;
private final List<TopicPartitionInfo> partitions;
private final Set<AclOperation> authorizedOperations;
private final Uuid topicId;
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final TopicDescription that = (TopicDescription) o;
return internal == that.internal &&
Objects.equals(name, that.name) &&
Objects.equals(partitions, that.partitions) &&
Objects.equals(authorizedOperations, that.authorizedOperations);
}
@Override
public int hashCode() {
return Objects.hash(name, internal, partitions, authorizedOperations);
}
/**
* Create an instance with the specified parameters.
*
* @param name The topic name
* @param internal Whether the topic is internal to Kafka
* @param partitions A list of partitions where the index represents the partition id and the element contains
* leadership and replica information for that partition.
*/
public TopicDescription(String name, boolean internal, List<TopicPartitionInfo> partitions) {
this(name, internal, partitions, Collections.emptySet());
}
/**
* Create an instance with the specified parameters.
*
* @param name The topic name
* @param internal Whether the topic is internal to Kafka
* @param partitions A list of partitions where the index represents the partition id and the element contains
* leadership and replica information for that partition.
* @param authorizedOperations authorized operations for this topic, or empty set if this is not known.
*/
public TopicDescription(String name, boolean internal, List<TopicPartitionInfo> partitions,
Set<AclOperation> authorizedOperations) {
this(name, internal, partitions, authorizedOperations, Uuid.ZERO_UUID);
}
/**
* Create an instance with the specified parameters.
*
* @param name The topic name
* @param internal Whether the topic is internal to Kafka
* @param partitions A list of partitions where the index represents the partition id and the element contains
* leadership and replica information for that partition.
* @param authorizedOperations authorized operations for this topic, or empty set if this is not known.
* @param topicId the topic id
*/
public TopicDescription(String name, boolean internal, List<TopicPartitionInfo> partitions,
Set<AclOperation> authorizedOperations, Uuid topicId) {
this.name = name;
this.internal = internal;
this.partitions = partitions;
this.authorizedOperations = authorizedOperations;
this.topicId = topicId;
}
/**
* The name of the topic.
*/
public String name() {
return name;
}
/**
* Whether the topic is internal to Kafka. An example of an internal topic is the offsets and group management topic:
* __consumer_offsets.
*/
public boolean isInternal() {
return internal;
}
public Uuid topicId() {
return topicId;
}
/**
* A list of partitions where the index represents the partition id and the element contains leadership and replica
* information for that partition.
*/
public List<TopicPartitionInfo> partitions() {
return partitions;
}
/**
* authorized operations for this topic, or null if this is not known.
*/
public Set<AclOperation> authorizedOperations() {
return authorizedOperations;
}
@Override
public String toString() {
return "(name=" + name + ", internal=" + internal + ", partitions=" +
Utils.join(partitions, ",") + ", authorizedOperations=" + authorizedOperations + ")";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/TopicListing.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.Uuid;
/**
* A listing of a topic in the cluster.
*/
public class TopicListing {
private final String name;
private final Uuid topicId;
private final boolean internal;
/**
* Create an instance with the specified parameters.
*
* @param name The topic name
* @param internal Whether the topic is internal to Kafka
* @deprecated Since 3.0 use {@link #TopicListing(String, Uuid, boolean)} instead
*/
@Deprecated
public TopicListing(String name, boolean internal) {
this.name = name;
this.internal = internal;
this.topicId = Uuid.ZERO_UUID;
}
/**
* Create an instance with the specified parameters.
*
* @param name The topic name
* @param topicId The topic id.
* @param internal Whether the topic is internal to Kafka
*/
public TopicListing(String name, Uuid topicId, boolean internal) {
this.topicId = topicId;
this.name = name;
this.internal = internal;
}
/**
* The id of the topic.
*/
public Uuid topicId() {
return topicId;
}
/**
* The name of the topic.
*/
public String name() {
return name;
}
/**
* Whether the topic is internal to Kafka. An example of an internal topic is the offsets and group management topic:
* __consumer_offsets.
*/
public boolean isInternal() {
return internal;
}
@Override
public String toString() {
return "(name=" + name + ", topicId=" + topicId + ", internal=" + internal + ")";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/TransactionDescription.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Objects;
import java.util.OptionalLong;
import java.util.Set;
@InterfaceStability.Evolving
public class TransactionDescription {
private final int coordinatorId;
private final TransactionState state;
private final long producerId;
private final int producerEpoch;
private final long transactionTimeoutMs;
private final OptionalLong transactionStartTimeMs;
private final Set<TopicPartition> topicPartitions;
public TransactionDescription(
int coordinatorId,
TransactionState state,
long producerId,
int producerEpoch,
long transactionTimeoutMs,
OptionalLong transactionStartTimeMs,
Set<TopicPartition> topicPartitions
) {
this.coordinatorId = coordinatorId;
this.state = state;
this.producerId = producerId;
this.producerEpoch = producerEpoch;
this.transactionTimeoutMs = transactionTimeoutMs;
this.transactionStartTimeMs = transactionStartTimeMs;
this.topicPartitions = topicPartitions;
}
public int coordinatorId() {
return coordinatorId;
}
public TransactionState state() {
return state;
}
public long producerId() {
return producerId;
}
public int producerEpoch() {
return producerEpoch;
}
public long transactionTimeoutMs() {
return transactionTimeoutMs;
}
public OptionalLong transactionStartTimeMs() {
return transactionStartTimeMs;
}
public Set<TopicPartition> topicPartitions() {
return topicPartitions;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TransactionDescription that = (TransactionDescription) o;
return coordinatorId == that.coordinatorId &&
producerId == that.producerId &&
producerEpoch == that.producerEpoch &&
transactionTimeoutMs == that.transactionTimeoutMs &&
state == that.state &&
Objects.equals(transactionStartTimeMs, that.transactionStartTimeMs) &&
Objects.equals(topicPartitions, that.topicPartitions);
}
@Override
public int hashCode() {
return Objects.hash(coordinatorId, state, producerId, producerEpoch, transactionTimeoutMs, transactionStartTimeMs, topicPartitions);
}
@Override
public String toString() {
return "TransactionDescription(" +
"coordinatorId=" + coordinatorId +
", state=" + state +
", producerId=" + producerId +
", producerEpoch=" + producerEpoch +
", transactionTimeoutMs=" + transactionTimeoutMs +
", transactionStartTimeMs=" + transactionStartTimeMs +
", topicPartitions=" + topicPartitions +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/TransactionListing.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Objects;
@InterfaceStability.Evolving
public class TransactionListing {
private final String transactionalId;
private final long producerId;
private final TransactionState transactionState;
public TransactionListing(
String transactionalId,
long producerId,
TransactionState transactionState
) {
this.transactionalId = transactionalId;
this.producerId = producerId;
this.transactionState = transactionState;
}
public String transactionalId() {
return transactionalId;
}
public long producerId() {
return producerId;
}
public TransactionState state() {
return transactionState;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TransactionListing that = (TransactionListing) o;
return producerId == that.producerId &&
Objects.equals(transactionalId, that.transactionalId) &&
transactionState == that.transactionState;
}
@Override
public int hashCode() {
return Objects.hash(transactionalId, producerId, transactionState);
}
@Override
public String toString() {
return "TransactionListing(" +
"transactionalId='" + transactionalId + '\'' +
", producerId=" + producerId +
", transactionState=" + transactionState +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/TransactionState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Arrays;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
@InterfaceStability.Evolving
public enum TransactionState {
ONGOING("Ongoing"),
PREPARE_ABORT("PrepareAbort"),
PREPARE_COMMIT("PrepareCommit"),
COMPLETE_ABORT("CompleteAbort"),
COMPLETE_COMMIT("CompleteCommit"),
EMPTY("Empty"),
PREPARE_EPOCH_FENCE("PrepareEpochFence"),
UNKNOWN("Unknown");
private final static Map<String, TransactionState> NAME_TO_ENUM = Arrays.stream(values())
.collect(Collectors.toMap(state -> state.name, Function.identity()));
private final String name;
TransactionState(String name) {
this.name = name;
}
@Override
public String toString() {
return name;
}
public static TransactionState parse(String name) {
TransactionState state = NAME_TO_ENUM.get(name);
return state == null ? UNKNOWN : state;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/UnregisterBrokerOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* Options for {@link Admin#unregisterBroker(int, UnregisterBrokerOptions)}.
*
* The API of this class is evolving. See {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class UnregisterBrokerOptions extends AbstractOptions<UpdateFeaturesOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/UnregisterBrokerResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
/**
* The result of the {@link Admin#unregisterBroker(int, UnregisterBrokerOptions)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
public class UnregisterBrokerResult {
private final KafkaFuture<Void> future;
UnregisterBrokerResult(final KafkaFuture<Void> future) {
this.future = future;
}
/**
* Return a future which succeeds if the operation is successful.
*/
public KafkaFuture<Void> all() {
return future;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/UpdateFeaturesOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Map;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* Options for {@link AdminClient#updateFeatures(Map, UpdateFeaturesOptions)}.
*
* The API of this class is evolving. See {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class UpdateFeaturesOptions extends AbstractOptions<UpdateFeaturesOptions> {
private boolean validateOnly = false;
@Deprecated
public boolean dryRun() {
return validateOnly;
}
public boolean validateOnly() {
return validateOnly;
}
@Deprecated
public UpdateFeaturesOptions dryRun(boolean dryRun) {
return validateOnly(dryRun);
}
public UpdateFeaturesOptions validateOnly(boolean validateOnly) {
this.validateOnly = validateOnly;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/UpdateFeaturesResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Map;
import org.apache.kafka.common.KafkaFuture;
/**
* The result of the {@link Admin#updateFeatures(Map, UpdateFeaturesOptions)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
public class UpdateFeaturesResult {
private final Map<String, KafkaFuture<Void>> futures;
/**
* @param futures a map from feature name to future, which can be used to check the status of
* individual feature updates.
*/
UpdateFeaturesResult(final Map<String, KafkaFuture<Void>> futures) {
this.futures = futures;
}
public Map<String, KafkaFuture<Void>> values() {
return futures;
}
/**
* Return a future which succeeds if all the feature updates succeed.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/UserScramCredentialAlteration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Objects;
/**
* A request to alter a user's SASL/SCRAM credentials.
*
* @see <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-554%3A+Add+Broker-side+SCRAM+Config+API">KIP-554: Add Broker-side SCRAM Config API</a>
*/
public abstract class UserScramCredentialAlteration {
protected final String user;
/**
*
* @param user the mandatory user
*/
protected UserScramCredentialAlteration(String user) {
this.user = Objects.requireNonNull(user);
}
/**
*
* @return the always non-null user
*/
public String user() {
return this.user;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/UserScramCredentialDeletion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Objects;
/**
* A request to delete a SASL/SCRAM credential for a user.
*
* @see <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-554%3A+Add+Broker-side+SCRAM+Config+API">KIP-554: Add Broker-side SCRAM Config API</a>
*/
public class UserScramCredentialDeletion extends UserScramCredentialAlteration {
private final ScramMechanism mechanism;
/**
* @param user the mandatory user
* @param mechanism the mandatory mechanism
*/
public UserScramCredentialDeletion(String user, ScramMechanism mechanism) {
super(user);
this.mechanism = Objects.requireNonNull(mechanism);
}
/**
*
* @return the always non-null mechanism
*/
public ScramMechanism mechanism() {
return mechanism;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/UserScramCredentialUpsertion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.security.scram.internals.ScramFormatter;
import java.nio.charset.StandardCharsets;
import java.security.SecureRandom;
import java.util.Objects;
/**
* A request to update/insert a SASL/SCRAM credential for a user.
*
* @see <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-554%3A+Add+Broker-side+SCRAM+Config+API">KIP-554: Add Broker-side SCRAM Config API</a>
*/
public class UserScramCredentialUpsertion extends UserScramCredentialAlteration {
private final ScramCredentialInfo info;
private final byte[] salt;
private final byte[] password;
/**
* Constructor that generates a random salt
*
* @param user the user for which the credential is to be updated/inserted
* @param credentialInfo the mechanism and iterations to be used
* @param password the password
*/
public UserScramCredentialUpsertion(String user, ScramCredentialInfo credentialInfo, String password) {
this(user, credentialInfo, password.getBytes(StandardCharsets.UTF_8));
}
/**
* Constructor that generates a random salt
*
* @param user the user for which the credential is to be updated/inserted
* @param credentialInfo the mechanism and iterations to be used
* @param password the password
*/
public UserScramCredentialUpsertion(String user, ScramCredentialInfo credentialInfo, byte[] password) {
this(user, credentialInfo, password, generateRandomSalt());
}
/**
* Constructor that accepts an explicit salt
*
* @param user the user for which the credential is to be updated/inserted
* @param credentialInfo the mechanism and iterations to be used
* @param password the password
* @param salt the salt to be used
*/
public UserScramCredentialUpsertion(String user, ScramCredentialInfo credentialInfo, byte[] password, byte[] salt) {
super(Objects.requireNonNull(user));
this.info = Objects.requireNonNull(credentialInfo);
this.password = Objects.requireNonNull(password);
this.salt = Objects.requireNonNull(salt);
}
/**
*
* @return the mechanism and iterations
*/
public ScramCredentialInfo credentialInfo() {
return info;
}
/**
*
* @return the salt
*/
public byte[] salt() {
return salt;
}
/**
*
* @return the password
*/
public byte[] password() {
return password;
}
private static byte[] generateRandomSalt() {
return ScramFormatter.secureRandomBytes(new SecureRandom());
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/UserScramCredentialsDescription.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
/**
* Representation of all SASL/SCRAM credentials associated with a user that can be retrieved, or an exception indicating
* why credentials could not be retrieved.
*
* @see <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-554%3A+Add+Broker-side+SCRAM+Config+API">KIP-554: Add Broker-side SCRAM Config API</a>
*/
public class UserScramCredentialsDescription {
private final String name;
private final List<ScramCredentialInfo> credentialInfos;
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
UserScramCredentialsDescription that = (UserScramCredentialsDescription) o;
return name.equals(that.name) &&
credentialInfos.equals(that.credentialInfos);
}
@Override
public int hashCode() {
return Objects.hash(name, credentialInfos);
}
@Override
public String toString() {
return "UserScramCredentialsDescription{" +
"name='" + name + '\'' +
", credentialInfos=" + credentialInfos +
'}';
}
/**
*
* @param name the required user name
* @param credentialInfos the required SASL/SCRAM credential representations for the user
*/
public UserScramCredentialsDescription(String name, List<ScramCredentialInfo> credentialInfos) {
this.name = Objects.requireNonNull(name);
this.credentialInfos = Collections.unmodifiableList(new ArrayList<>(credentialInfos));
}
/**
*
* @return the user name
*/
public String name() {
return name;
}
/**
*
* @return the always non-null/unmodifiable list of SASL/SCRAM credential representations for the user
*/
public List<ScramCredentialInfo> credentialInfos() {
return credentialInfos;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.