index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides a Kafka client for performing administrative operations (such as creating topics and configuring brokers) on a Kafka cluster. */ package org.apache.kafka.clients.admin;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/AbortTransactionHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.clients.admin.AbortTransactionSpec; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.InvalidProducerEpochException; import org.apache.kafka.common.errors.TransactionCoordinatorFencedException; import org.apache.kafka.common.message.WriteTxnMarkersRequestData; import org.apache.kafka.common.message.WriteTxnMarkersResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.WriteTxnMarkersRequest; import org.apache.kafka.common.requests.WriteTxnMarkersResponse; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; import java.util.List; import java.util.Set; import static java.util.Collections.singleton; import static java.util.Collections.singletonList; public class AbortTransactionHandler extends AdminApiHandler.Batched<TopicPartition, Void> { private final Logger log; private final AbortTransactionSpec abortSpec; private final PartitionLeaderStrategy lookupStrategy; public AbortTransactionHandler( AbortTransactionSpec abortSpec, LogContext logContext ) { this.abortSpec = abortSpec; this.log = logContext.logger(AbortTransactionHandler.class); this.lookupStrategy = new PartitionLeaderStrategy(logContext); } public static AdminApiFuture.SimpleAdminApiFuture<TopicPartition, Void> newFuture( Set<TopicPartition> topicPartitions ) { return AdminApiFuture.forKeys(topicPartitions); } @Override public String apiName() { return "abortTransaction"; } @Override public AdminApiLookupStrategy<TopicPartition> lookupStrategy() { return lookupStrategy; } @Override public WriteTxnMarkersRequest.Builder buildBatchedRequest( int brokerId, Set<TopicPartition> topicPartitions ) { validateTopicPartitions(topicPartitions); WriteTxnMarkersRequestData.WritableTxnMarker marker = new WriteTxnMarkersRequestData.WritableTxnMarker() .setCoordinatorEpoch(abortSpec.coordinatorEpoch()) .setProducerEpoch(abortSpec.producerEpoch()) .setProducerId(abortSpec.producerId()) .setTransactionResult(false); marker.topics().add(new WriteTxnMarkersRequestData.WritableTxnMarkerTopic() .setName(abortSpec.topicPartition().topic()) .setPartitionIndexes(singletonList(abortSpec.topicPartition().partition())) ); WriteTxnMarkersRequestData request = new WriteTxnMarkersRequestData(); request.markers().add(marker); return new WriteTxnMarkersRequest.Builder(request); } @Override public ApiResult<TopicPartition, Void> handleResponse( Node broker, Set<TopicPartition> topicPartitions, AbstractResponse abstractResponse ) { validateTopicPartitions(topicPartitions); WriteTxnMarkersResponse response = (WriteTxnMarkersResponse) abstractResponse; List<WriteTxnMarkersResponseData.WritableTxnMarkerResult> markerResponses = response.data().markers(); if (markerResponses.size() != 1 || markerResponses.get(0).producerId() != abortSpec.producerId()) { return ApiResult.failed(abortSpec.topicPartition(), new KafkaException("WriteTxnMarkers response " + "included unexpected marker entries: " + markerResponses + "(expected to find exactly one " + "entry with producerId " + abortSpec.producerId() + ")")); } WriteTxnMarkersResponseData.WritableTxnMarkerResult markerResponse = markerResponses.get(0); List<WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult> topicResponses = markerResponse.topics(); if (topicResponses.size() != 1 || !topicResponses.get(0).name().equals(abortSpec.topicPartition().topic())) { return ApiResult.failed(abortSpec.topicPartition(), new KafkaException("WriteTxnMarkers response " + "included unexpected topic entries: " + markerResponses + "(expected to find exactly one " + "entry with topic partition " + abortSpec.topicPartition() + ")")); } WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult topicResponse = topicResponses.get(0); List<WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult> partitionResponses = topicResponse.partitions(); if (partitionResponses.size() != 1 || partitionResponses.get(0).partitionIndex() != abortSpec.topicPartition().partition()) { return ApiResult.failed(abortSpec.topicPartition(), new KafkaException("WriteTxnMarkers response " + "included unexpected partition entries for topic " + abortSpec.topicPartition().topic() + ": " + markerResponses + "(expected to find exactly one entry with partition " + abortSpec.topicPartition().partition() + ")")); } WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult partitionResponse = partitionResponses.get(0); Errors error = Errors.forCode(partitionResponse.errorCode()); if (error != Errors.NONE) { return handleError(error); } else { return ApiResult.completed(abortSpec.topicPartition(), null); } } private ApiResult<TopicPartition, Void> handleError(Errors error) { switch (error) { case CLUSTER_AUTHORIZATION_FAILED: log.error("WriteTxnMarkers request for abort spec {} failed cluster authorization", abortSpec); return ApiResult.failed(abortSpec.topicPartition(), new ClusterAuthorizationException( "WriteTxnMarkers request with " + abortSpec + " failed due to cluster " + "authorization error")); case INVALID_PRODUCER_EPOCH: log.error("WriteTxnMarkers request for abort spec {} failed due to an invalid producer epoch", abortSpec); return ApiResult.failed(abortSpec.topicPartition(), new InvalidProducerEpochException( "WriteTxnMarkers request with " + abortSpec + " failed due an invalid producer epoch")); case TRANSACTION_COORDINATOR_FENCED: log.error("WriteTxnMarkers request for abort spec {} failed because the coordinator epoch is fenced", abortSpec); return ApiResult.failed(abortSpec.topicPartition(), new TransactionCoordinatorFencedException( "WriteTxnMarkers request with " + abortSpec + " failed since the provided " + "coordinator epoch " + abortSpec.coordinatorEpoch() + " has been fenced " + "by the active coordinator")); case NOT_LEADER_OR_FOLLOWER: case REPLICA_NOT_AVAILABLE: case BROKER_NOT_AVAILABLE: case UNKNOWN_TOPIC_OR_PARTITION: log.debug("WriteTxnMarkers request for abort spec {} failed due to {}. Will retry after attempting to " + "find the leader again", abortSpec, error); return ApiResult.unmapped(singletonList(abortSpec.topicPartition())); default: log.error("WriteTxnMarkers request for abort spec {} failed due to an unexpected error {}", abortSpec, error); return ApiResult.failed(abortSpec.topicPartition(), error.exception( "WriteTxnMarkers request with " + abortSpec + " failed due to unexpected error: " + error.message())); } } private void validateTopicPartitions(Set<TopicPartition> topicPartitions) { if (!topicPartitions.equals(singleton(abortSpec.topicPartition()))) { throw new IllegalArgumentException("Received unexpected topic partitions " + topicPartitions + " (expected only " + singleton(abortSpec.topicPartition()) + ")"); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/AdminApiDriver.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.DisconnectException; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest.NoBatchedFindCoordinatorsException; import org.apache.kafka.common.requests.OffsetFetchRequest.NoBatchedOffsetFetchRequestException; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.OptionalInt; import java.util.Set; import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; /** * The `KafkaAdminClient`'s internal `Call` primitive is not a good fit for multi-stage * request workflows such as we see with the group coordinator APIs or any request which * needs to be sent to a partition leader. Typically these APIs have two concrete stages: * * 1. Lookup: Find the broker that can fulfill the request (e.g. partition leader or group * coordinator) * 2. Fulfillment: Send the request to the broker found in the first step * * This is complicated by the fact that `Admin` APIs are typically batched, which * means the Lookup stage may result in a set of brokers. For example, take a `ListOffsets` * request for a set of topic partitions. In the Lookup stage, we will find the partition * leaders for this set of partitions; in the Fulfillment stage, we will group together * partition according to the IDs of the discovered leaders. * * Additionally, the flow between these two stages is bi-directional. We may find after * sending a `ListOffsets` request to an expected leader that there was a leader change. * This would result in a topic partition being sent back to the Lookup stage. * * Managing this complexity by chaining together `Call` implementations is challenging * and messy, so instead we use this class to do the bookkeeping. It handles both the * batching aspect as well as the transitions between the Lookup and Fulfillment stages. * * Note that the interpretation of the `retries` configuration becomes ambiguous * for this kind of pipeline. We could treat it as an overall limit on the number * of requests that can be sent, but that is not very useful because each pipeline * has a minimum number of requests that need to be sent in order to satisfy the request. * Instead, we treat this number of retries independently at each stage so that each * stage has at least one opportunity to complete. So if a user sets `retries=1`, then * the full pipeline can still complete as long as there are no request failures. * * @param <K> The key type, which is also the granularity of the request routing (e.g. * this could be `TopicPartition` in the case of requests intended for a partition * leader or the `GroupId` in the case of consumer group requests intended for * the group coordinator) * @param <V> The fulfillment type for each key (e.g. this could be consumer group state * when the key type is a consumer `GroupId`) */ public class AdminApiDriver<K, V> { private final Logger log; private final long retryBackoffMs; private final long deadlineMs; private final AdminApiHandler<K, V> handler; private final AdminApiFuture<K, V> future; private final BiMultimap<ApiRequestScope, K> lookupMap = new BiMultimap<>(); private final BiMultimap<FulfillmentScope, K> fulfillmentMap = new BiMultimap<>(); private final Map<ApiRequestScope, RequestState> requestStates = new HashMap<>(); public AdminApiDriver( AdminApiHandler<K, V> handler, AdminApiFuture<K, V> future, long deadlineMs, long retryBackoffMs, LogContext logContext ) { this.handler = handler; this.future = future; this.deadlineMs = deadlineMs; this.retryBackoffMs = retryBackoffMs; this.log = logContext.logger(AdminApiDriver.class); retryLookup(future.lookupKeys()); } /** * Associate a key with a brokerId. This is called after a response in the Lookup * stage reveals the mapping (e.g. when the `FindCoordinator` tells us the group * coordinator for a specific consumer group). */ private void map(K key, Integer brokerId) { lookupMap.remove(key); fulfillmentMap.put(new FulfillmentScope(brokerId), key); } /** * Disassociate a key from the currently mapped brokerId. This will send the key * back to the Lookup stage, which will allow us to attempt lookup again. */ private void unmap(K key) { fulfillmentMap.remove(key); ApiRequestScope lookupScope = handler.lookupStrategy().lookupScope(key); OptionalInt destinationBrokerId = lookupScope.destinationBrokerId(); if (destinationBrokerId.isPresent()) { fulfillmentMap.put(new FulfillmentScope(destinationBrokerId.getAsInt()), key); } else { lookupMap.put(handler.lookupStrategy().lookupScope(key), key); } } private void clear(Collection<K> keys) { keys.forEach(key -> { lookupMap.remove(key); fulfillmentMap.remove(key); }); } OptionalInt keyToBrokerId(K key) { Optional<FulfillmentScope> scope = fulfillmentMap.getKey(key); return scope .map(fulfillmentScope -> OptionalInt.of(fulfillmentScope.destinationBrokerId)) .orElseGet(OptionalInt::empty); } /** * Complete the future associated with the given key exceptionally. After is called, * the key will be taken out of both the Lookup and Fulfillment stages so that request * are not retried. */ private void completeExceptionally(Map<K, Throwable> errors) { if (!errors.isEmpty()) { future.completeExceptionally(errors); clear(errors.keySet()); } } private void completeLookupExceptionally(Map<K, Throwable> errors) { if (!errors.isEmpty()) { future.completeLookupExceptionally(errors); clear(errors.keySet()); } } private void retryLookup(Collection<K> keys) { keys.forEach(this::unmap); } /** * Complete the future associated with the given key. After this is called, all keys will * be taken out of both the Lookup and Fulfillment stages so that request are not retried. */ private void complete(Map<K, V> values) { if (!values.isEmpty()) { future.complete(values); clear(values.keySet()); } } private void completeLookup(Map<K, Integer> brokerIdMapping) { if (!brokerIdMapping.isEmpty()) { future.completeLookup(brokerIdMapping); brokerIdMapping.forEach(this::map); } } /** * Check whether any requests need to be sent. This should be called immediately * after the driver is constructed and then again after each request returns * (i.e. after {@link #onFailure(long, RequestSpec, Throwable)} or * {@link #onResponse(long, RequestSpec, AbstractResponse, Node)}). * * @return A list of requests that need to be sent */ public List<RequestSpec<K>> poll() { List<RequestSpec<K>> requests = new ArrayList<>(); collectLookupRequests(requests); collectFulfillmentRequests(requests); return requests; } /** * Callback that is invoked when a `Call` returns a response successfully. */ public void onResponse( long currentTimeMs, RequestSpec<K> spec, AbstractResponse response, Node node ) { clearInflightRequest(currentTimeMs, spec); if (spec.scope instanceof FulfillmentScope) { AdminApiHandler.ApiResult<K, V> result = handler.handleResponse( node, spec.keys, response ); complete(result.completedKeys); completeExceptionally(result.failedKeys); retryLookup(result.unmappedKeys); } else { AdminApiLookupStrategy.LookupResult<K> result = handler.lookupStrategy().handleResponse( spec.keys, response ); result.completedKeys.forEach(lookupMap::remove); completeLookup(result.mappedKeys); completeLookupExceptionally(result.failedKeys); } } /** * Callback that is invoked when a `Call` is failed. */ public void onFailure( long currentTimeMs, RequestSpec<K> spec, Throwable t ) { clearInflightRequest(currentTimeMs, spec); if (t instanceof DisconnectException) { log.debug("Node disconnected before response could be received for request {}. " + "Will attempt retry", spec.request); // After a disconnect, we want the driver to attempt to lookup the key // again. This gives us a chance to find a new coordinator or partition // leader for example. Set<K> keysToUnmap = spec.keys.stream() .filter(future.lookupKeys()::contains) .collect(Collectors.toSet()); retryLookup(keysToUnmap); } else if (t instanceof NoBatchedFindCoordinatorsException || t instanceof NoBatchedOffsetFetchRequestException) { ((CoordinatorStrategy) handler.lookupStrategy()).disableBatch(); Set<K> keysToUnmap = spec.keys.stream() .filter(future.lookupKeys()::contains) .collect(Collectors.toSet()); retryLookup(keysToUnmap); } else { Map<K, Throwable> errors = spec.keys.stream().collect(Collectors.toMap( Function.identity(), key -> t )); if (spec.scope instanceof FulfillmentScope) { completeExceptionally(errors); } else { completeLookupExceptionally(errors); } } } private void clearInflightRequest(long currentTimeMs, RequestSpec<K> spec) { RequestState requestState = requestStates.get(spec.scope); if (requestState != null) { // Only apply backoff if it's not a retry of a lookup request if (spec.scope instanceof FulfillmentScope) { requestState.clearInflight(currentTimeMs + retryBackoffMs); } else { requestState.clearInflight(currentTimeMs); } } } private <T extends ApiRequestScope> void collectRequests( List<RequestSpec<K>> requests, BiMultimap<T, K> multimap, BiFunction<Set<K>, T, Collection<AdminApiHandler.RequestAndKeys<K>>> buildRequest ) { for (Map.Entry<T, Set<K>> entry : multimap.entrySet()) { T scope = entry.getKey(); Set<K> keys = entry.getValue(); if (keys.isEmpty()) { continue; } RequestState requestState = requestStates.computeIfAbsent(scope, c -> new RequestState()); if (requestState.hasInflight()) { continue; } // Copy the keys to avoid exposing the underlying mutable set Set<K> copyKeys = Collections.unmodifiableSet(new HashSet<>(keys)); Collection<AdminApiHandler.RequestAndKeys<K>> newRequests = buildRequest.apply(copyKeys, scope); if (newRequests.isEmpty()) { return; } // Only process the first request; all the remaining requests will be targeted at the same broker // and we don't want to issue more than one fulfillment request per broker at a time AdminApiHandler.RequestAndKeys<K> newRequest = newRequests.iterator().next(); RequestSpec<K> spec = new RequestSpec<>( handler.apiName() + "(api=" + newRequest.request.apiKey() + ")", scope, newRequest.keys, newRequest.request, requestState.nextAllowedRetryMs, deadlineMs, requestState.tries ); requestState.setInflight(spec); requests.add(spec); } } private void collectLookupRequests(List<RequestSpec<K>> requests) { collectRequests( requests, lookupMap, (keys, scope) -> Collections.singletonList(new AdminApiHandler.RequestAndKeys<>(handler.lookupStrategy().buildRequest(keys), keys)) ); } private void collectFulfillmentRequests(List<RequestSpec<K>> requests) { collectRequests( requests, fulfillmentMap, (keys, scope) -> handler.buildRequest(scope.destinationBrokerId, keys) ); } /** * This is a helper class which helps us to map requests that need to be sent * to the internal `Call` implementation that is used internally in * {@link org.apache.kafka.clients.admin.KafkaAdminClient}. */ public static class RequestSpec<K> { public final String name; public final ApiRequestScope scope; public final Set<K> keys; public final AbstractRequest.Builder<?> request; public final long nextAllowedTryMs; public final long deadlineMs; public final int tries; public RequestSpec( String name, ApiRequestScope scope, Set<K> keys, AbstractRequest.Builder<?> request, long nextAllowedTryMs, long deadlineMs, int tries ) { this.name = name; this.scope = scope; this.keys = keys; this.request = request; this.nextAllowedTryMs = nextAllowedTryMs; this.deadlineMs = deadlineMs; this.tries = tries; } @Override public String toString() { return "RequestSpec(" + "name=" + name + ", scope=" + scope + ", keys=" + keys + ", request=" + request + ", nextAllowedTryMs=" + nextAllowedTryMs + ", deadlineMs=" + deadlineMs + ", tries=" + tries + ')'; } } /** * Helper class used to track the request state within each request scope. * This class enforces a maximum number of inflight request and keeps track * of backoff/retry state. */ private class RequestState { private Optional<RequestSpec<K>> inflightRequest = Optional.empty(); private int tries = 0; private long nextAllowedRetryMs = 0; boolean hasInflight() { return inflightRequest.isPresent(); } public void clearInflight(long nextAllowedRetryMs) { this.inflightRequest = Optional.empty(); this.nextAllowedRetryMs = nextAllowedRetryMs; } public void setInflight(RequestSpec<K> spec) { this.inflightRequest = Optional.of(spec); this.tries++; } } /** * Completion of the Lookup stage results in a destination broker to send the * fulfillment request to. Each destination broker in the Fulfillment stage * gets its own request scope. */ private static class FulfillmentScope implements ApiRequestScope { public final int destinationBrokerId; private FulfillmentScope(int destinationBrokerId) { this.destinationBrokerId = destinationBrokerId; } @Override public OptionalInt destinationBrokerId() { return OptionalInt.of(destinationBrokerId); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; FulfillmentScope that = (FulfillmentScope) o; return destinationBrokerId == that.destinationBrokerId; } @Override public int hashCode() { return Objects.hash(destinationBrokerId); } } /** * Helper class which maintains a bi-directional mapping from a key to a set of values. * Each value can map to one and only one key, but many values can be associated with * a single key. * * @param <K> The key type * @param <V> The value type */ private static class BiMultimap<K, V> { private final Map<V, K> reverseMap = new HashMap<>(); private final Map<K, Set<V>> map = new HashMap<>(); void put(K key, V value) { remove(value); reverseMap.put(value, key); map.computeIfAbsent(key, k -> new HashSet<>()).add(value); } void remove(V value) { K key = reverseMap.remove(value); if (key != null) { Set<V> set = map.get(key); if (set != null) { set.remove(value); if (set.isEmpty()) { map.remove(key); } } } } Optional<K> getKey(V value) { return Optional.ofNullable(reverseMap.get(value)); } Set<Map.Entry<K, Set<V>>> entrySet() { return map.entrySet(); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/AdminApiFuture.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.internals.KafkaFutureImpl; import java.util.Map; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; public interface AdminApiFuture<K, V> { /** * The initial set of lookup keys. Although this will usually match the fulfillment * keys, it does not necessarily have to. For example, in the case of * {@link AllBrokersStrategy.AllBrokersFuture}, * we use the lookup phase in order to discover the set of keys that will be searched * during the fulfillment phase. * * @return non-empty set of initial lookup keys */ Set<K> lookupKeys(); /** * Complete the futures associated with the given keys. * * @param values the completed keys with their respective values */ void complete(Map<K, V> values); /** * Invoked when lookup of a set of keys succeeds. * * @param brokerIdMapping the discovered mapping from key to the respective brokerId that will * handle the fulfillment request */ default void completeLookup(Map<K, Integer> brokerIdMapping) { } /** * Invoked when lookup fails with a fatal error on a set of keys. * * @param lookupErrors the set of keys that failed lookup with their respective errors */ default void completeLookupExceptionally(Map<K, Throwable> lookupErrors) { completeExceptionally(lookupErrors); } /** * Complete the futures associated with the given keys exceptionally. * * @param errors the failed keys with their respective errors */ void completeExceptionally(Map<K, Throwable> errors); static <K, V> SimpleAdminApiFuture<K, V> forKeys(Set<K> keys) { return new SimpleAdminApiFuture<>(keys); } /** * This class can be used when the set of keys is known ahead of time. */ class SimpleAdminApiFuture<K, V> implements AdminApiFuture<K, V> { private final Map<K, KafkaFuture<V>> futures; public SimpleAdminApiFuture(Set<K> keys) { this.futures = keys.stream().collect(Collectors.toMap( Function.identity(), k -> new KafkaFutureImpl<>() )); } @Override public Set<K> lookupKeys() { return futures.keySet(); } @Override public void complete(Map<K, V> values) { values.forEach(this::complete); } private void complete(K key, V value) { futureOrThrow(key).complete(value); } @Override public void completeExceptionally(Map<K, Throwable> errors) { errors.forEach(this::completeExceptionally); } private void completeExceptionally(K key, Throwable t) { futureOrThrow(key).completeExceptionally(t); } private KafkaFutureImpl<V> futureOrThrow(K key) { // The below typecast is safe because we initialise futures using only KafkaFutureImpl. KafkaFutureImpl<V> future = (KafkaFutureImpl<V>) futures.get(key); if (future == null) { throw new IllegalArgumentException("Attempt to complete future for " + key + ", which was not requested"); } else { return future; } } public Map<K, KafkaFuture<V>> all() { return futures; } public KafkaFuture<V> get(K key) { return futures.get(key); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/AdminApiHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.common.Node; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.requests.AbstractResponse; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; public interface AdminApiHandler<K, V> { /** * Get a user-friendly name for the API this handler is implementing. */ String apiName(); /** * Build the requests necessary for the given keys. The set of keys is derived by * {@link AdminApiDriver} during the lookup stage as the set of keys which all map * to the same destination broker. Handlers can choose to issue a single request for * all of the provided keys (see {@link Batched}, issue one request per key (see * {@link Unbatched}, or implement their own custom grouping logic if necessary. * * @param brokerId the target brokerId for the request * @param keys the set of keys that should be handled by this request * * @return a collection of {@link RequestAndKeys} for the requests containing the given keys */ Collection<RequestAndKeys<K>> buildRequest(int brokerId, Set<K> keys); /** * Callback that is invoked when a request returns successfully. * The handler should parse the response, check for errors, and return a * result which indicates which keys (if any) have either been completed or * failed with an unrecoverable error. * * It is also possible that the response indicates an incorrect target brokerId * (e.g. in the case of a NotLeader error when the request is bound for a partition * leader). In this case the key will be "unmapped" from the target brokerId * and lookup will be retried. * * Note that keys which received a retriable error should be left out of the * result. They will be retried automatically. * * @param broker the broker that the associated request was sent to * @param keys the set of keys from the associated request * @param response the response received from the broker * * @return result indicating key completion, failure, and unmapping */ ApiResult<K, V> handleResponse(Node broker, Set<K> keys, AbstractResponse response); /** * Get the lookup strategy that is responsible for finding the brokerId * which will handle each respective key. * * @return non-null lookup strategy */ AdminApiLookupStrategy<K> lookupStrategy(); class ApiResult<K, V> { public final Map<K, V> completedKeys; public final Map<K, Throwable> failedKeys; public final List<K> unmappedKeys; public ApiResult( Map<K, V> completedKeys, Map<K, Throwable> failedKeys, List<K> unmappedKeys ) { this.completedKeys = Collections.unmodifiableMap(completedKeys); this.failedKeys = Collections.unmodifiableMap(failedKeys); this.unmappedKeys = Collections.unmodifiableList(unmappedKeys); } public static <K, V> ApiResult<K, V> completed(K key, V value) { return new ApiResult<>( Collections.singletonMap(key, value), Collections.emptyMap(), Collections.emptyList() ); } public static <K, V> ApiResult<K, V> failed(K key, Throwable t) { return new ApiResult<>( Collections.emptyMap(), Collections.singletonMap(key, t), Collections.emptyList() ); } public static <K, V> ApiResult<K, V> unmapped(List<K> keys) { return new ApiResult<>( Collections.emptyMap(), Collections.emptyMap(), keys ); } public static <K, V> ApiResult<K, V> empty() { return new ApiResult<>( Collections.emptyMap(), Collections.emptyMap(), Collections.emptyList() ); } } class RequestAndKeys<K> { public final AbstractRequest.Builder<?> request; public final Set<K> keys; public RequestAndKeys(AbstractRequest.Builder<?> request, Set<K> keys) { this.request = request; this.keys = keys; } } /** * An {@link AdminApiHandler} that will group multiple keys into a single request when possible. * Keys will be grouped together whenever they target the same broker. This type of handler * should be used when interacting with broker APIs that can act on multiple keys at once, such * as describing or listing transactions. */ abstract class Batched<K, V> implements AdminApiHandler<K, V> { abstract AbstractRequest.Builder<?> buildBatchedRequest(int brokerId, Set<K> keys); @Override public final Collection<RequestAndKeys<K>> buildRequest(int brokerId, Set<K> keys) { return Collections.singleton(new RequestAndKeys<>(buildBatchedRequest(brokerId, keys), keys)); } } /** * An {@link AdminApiHandler} that will create one request per key, not performing any grouping based * on the targeted broker. This type of handler should only be used for broker APIs that do not accept * multiple keys at once, such as initializing a transactional producer. */ abstract class Unbatched<K, V> implements AdminApiHandler<K, V> { abstract AbstractRequest.Builder<?> buildSingleRequest(int brokerId, K key); abstract ApiResult<K, V> handleSingleResponse(Node broker, K key, AbstractResponse response); @Override public final Collection<RequestAndKeys<K>> buildRequest(int brokerId, Set<K> keys) { return keys.stream() .map(key -> new RequestAndKeys<>(buildSingleRequest(brokerId, key), Collections.singleton(key))) .collect(Collectors.toSet()); } @Override public final ApiResult<K, V> handleResponse(Node broker, Set<K> keys, AbstractResponse response) { if (keys.size() != 1) { throw new IllegalArgumentException("Unbatched admin handler should only be required to handle responses for a single key at a time"); } K key = keys.iterator().next(); return handleSingleResponse(broker, key, response); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/AdminApiLookupStrategy.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.requests.AbstractResponse; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; public interface AdminApiLookupStrategy<T> { /** * Define the scope of a given key for lookup. Key lookups are complicated * by the need to accommodate different batching mechanics. For example, * a `Metadata` request supports arbitrary batching of topic partitions in * order to discover partitions leaders. This can be supported by returning * a single scope object for all keys. * * On the other hand, `FindCoordinator` requests only support lookup of a * single key. This can be supported by returning a different scope object * for each lookup key. * * Note that if the {@link ApiRequestScope#destinationBrokerId()} maps to * a specific brokerId, then lookup will be skipped. See the use of * {@link StaticBrokerStrategy} in {@link DescribeProducersHandler} for * an example of this usage. * * @param key the lookup key * * @return request scope indicating how lookup requests can be batched together */ ApiRequestScope lookupScope(T key); /** * Build the lookup request for a set of keys. The grouping of the keys is controlled * through {@link #lookupScope(Object)}. In other words, each set of keys that map * to the same request scope object will be sent to this method. * * @param keys the set of keys that require lookup * * @return a builder for the lookup request */ AbstractRequest.Builder<?> buildRequest(Set<T> keys); /** * Callback that is invoked when a lookup request returns successfully. The handler * should parse the response, check for errors, and return a result indicating * which keys were mapped to a brokerId successfully and which keys received * a fatal error (e.g. a topic authorization failure). * * Note that keys which receive a retriable error should be left out of the * result. They will be retried automatically. For example, if the response of * `FindCoordinator` request indicates an unavailable coordinator, then the key * should be left out of the result so that the request will be retried. * * @param keys the set of keys from the associated request * @param response the response received from the broker * * @return a result indicating which keys mapped successfully to a brokerId and * which encountered a fatal error */ LookupResult<T> handleResponse(Set<T> keys, AbstractResponse response); class LookupResult<K> { // This is the set of keys that have been completed by the lookup phase itself. // The driver will not attempt lookup or fulfillment for completed keys. public final List<K> completedKeys; // This is the set of keys that have been mapped to a specific broker for // fulfillment of the API request. public final Map<K, Integer> mappedKeys; // This is the set of keys that have encountered a fatal error during the lookup // phase. The driver will not attempt lookup or fulfillment for failed keys. public final Map<K, Throwable> failedKeys; public LookupResult( Map<K, Throwable> failedKeys, Map<K, Integer> mappedKeys ) { this(Collections.emptyList(), failedKeys, mappedKeys); } public LookupResult( List<K> completedKeys, Map<K, Throwable> failedKeys, Map<K, Integer> mappedKeys ) { this.completedKeys = Collections.unmodifiableList(completedKeys); this.failedKeys = Collections.unmodifiableMap(failedKeys); this.mappedKeys = Collections.unmodifiableMap(mappedKeys); } static <K> LookupResult<K> empty() { return new LookupResult<>(emptyMap(), emptyMap()); } static <K> LookupResult<K> failed(K key, Throwable exception) { return new LookupResult<>(singletonMap(key, exception), emptyMap()); } static <K> LookupResult<K> mapped(K key, Integer brokerId) { return new LookupResult<>(emptyMap(), singletonMap(key, brokerId)); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/AdminMetadataManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.clients.MetadataUpdater; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.requests.RequestHeader; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; import java.util.Collections; import java.util.List; import java.util.Optional; /** * Manages the metadata for KafkaAdminClient. * * This class is not thread-safe. It is only accessed from the AdminClient * service thread (which also uses the NetworkClient). */ public class AdminMetadataManager { private final Logger log; /** * The minimum amount of time that we should wait between subsequent * retries, when fetching metadata. */ private final long refreshBackoffMs; /** * The minimum amount of time that we should wait before triggering an * automatic metadata refresh. */ private final long metadataExpireMs; /** * Used to update the NetworkClient metadata. */ private final AdminMetadataUpdater updater; /** * The current metadata state. */ private State state = State.QUIESCENT; /** * The time in wall-clock milliseconds when we last updated the metadata. */ private long lastMetadataUpdateMs = 0; /** * The time in wall-clock milliseconds when we last attempted to fetch new * metadata. */ private long lastMetadataFetchAttemptMs = 0; /** * The current cluster information. */ private Cluster cluster = Cluster.empty(); /** * If we got an authorization exception when we last attempted to fetch * metadata, this is it; null, otherwise. */ private AuthenticationException authException = null; public class AdminMetadataUpdater implements MetadataUpdater { @Override public List<Node> fetchNodes() { return cluster.nodes(); } @Override public boolean isUpdateDue(long now) { return false; } @Override public long maybeUpdate(long now) { return Long.MAX_VALUE; } @Override public void handleServerDisconnect(long now, String destinationId, Optional<AuthenticationException> maybeFatalException) { maybeFatalException.ifPresent(AdminMetadataManager.this::updateFailed); AdminMetadataManager.this.requestUpdate(); } @Override public void handleFailedRequest(long now, Optional<KafkaException> maybeFatalException) { // Do nothing } @Override public void handleSuccessfulResponse(RequestHeader requestHeader, long now, MetadataResponse metadataResponse) { // Do nothing } @Override public void close() { } } /** * The current AdminMetadataManager state. */ enum State { QUIESCENT, UPDATE_REQUESTED, UPDATE_PENDING } public AdminMetadataManager(LogContext logContext, long refreshBackoffMs, long metadataExpireMs) { this.log = logContext.logger(AdminMetadataManager.class); this.refreshBackoffMs = refreshBackoffMs; this.metadataExpireMs = metadataExpireMs; this.updater = new AdminMetadataUpdater(); } public AdminMetadataUpdater updater() { return updater; } public boolean isReady() { if (authException != null) { log.debug("Metadata is not usable: failed to get metadata.", authException); throw authException; } if (cluster.nodes().isEmpty()) { log.trace("Metadata is not ready: bootstrap nodes have not been " + "initialized yet."); return false; } if (cluster.isBootstrapConfigured()) { log.trace("Metadata is not ready: we have not fetched metadata from " + "the bootstrap nodes yet."); return false; } log.trace("Metadata is ready to use."); return true; } public Node controller() { return cluster.controller(); } public Node nodeById(int nodeId) { return cluster.nodeById(nodeId); } public void requestUpdate() { if (state == State.QUIESCENT) { state = State.UPDATE_REQUESTED; log.debug("Requesting metadata update."); } } public void clearController() { if (cluster.controller() != null) { log.trace("Clearing cached controller node {}.", cluster.controller()); this.cluster = new Cluster(cluster.clusterResource().clusterId(), cluster.nodes(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null); } } /** * Determine if the AdminClient should fetch new metadata. */ public long metadataFetchDelayMs(long now) { switch (state) { case QUIESCENT: // Calculate the time remaining until the next periodic update. // We want to avoid making many metadata requests in a short amount of time, // so there is a metadata refresh backoff period. return Math.max(delayBeforeNextAttemptMs(now), delayBeforeNextExpireMs(now)); case UPDATE_REQUESTED: // Respect the backoff, even if an update has been requested return delayBeforeNextAttemptMs(now); default: // An update is already pending, so we don't need to initiate another one. return Long.MAX_VALUE; } } private long delayBeforeNextExpireMs(long now) { long timeSinceUpdate = now - lastMetadataUpdateMs; return Math.max(0, metadataExpireMs - timeSinceUpdate); } private long delayBeforeNextAttemptMs(long now) { long timeSinceAttempt = now - lastMetadataFetchAttemptMs; return Math.max(0, refreshBackoffMs - timeSinceAttempt); } /** * Transition into the UPDATE_PENDING state. Updates lastMetadataFetchAttemptMs. */ public void transitionToUpdatePending(long now) { this.state = State.UPDATE_PENDING; this.lastMetadataFetchAttemptMs = now; } public void updateFailed(Throwable exception) { // We depend on pending calls to request another metadata update this.state = State.QUIESCENT; if (exception instanceof AuthenticationException) { log.warn("Metadata update failed due to authentication error", exception); this.authException = (AuthenticationException) exception; } else { log.info("Metadata update failed", exception); } } /** * Receive new metadata, and transition into the QUIESCENT state. * Updates lastMetadataUpdateMs, cluster, and authException. */ public void update(Cluster cluster, long now) { if (cluster.isBootstrapConfigured()) { log.debug("Setting bootstrap cluster metadata {}.", cluster); } else { log.debug("Updating cluster metadata to {}", cluster); this.lastMetadataUpdateMs = now; } this.state = State.QUIESCENT; this.authException = null; if (!cluster.nodes().isEmpty()) { this.cluster = cluster; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/AllBrokersStrategy.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.common.internals.KafkaFutureImpl; import org.apache.kafka.common.message.MetadataRequestData; import org.apache.kafka.common.message.MetadataResponseData; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.OptionalInt; import java.util.Set; import java.util.stream.Collectors; /** * This class is used for use cases which require requests to be sent to all * brokers in the cluster. * * This is a slightly degenerate case of a lookup strategy in the sense that * the broker IDs are used as both the keys and values. Also, unlike * {@link CoordinatorStrategy} and {@link PartitionLeaderStrategy}, we do not * know the set of keys ahead of time: we require the initial lookup in order * to discover what the broker IDs are. This is represented with a more complex * type {@code Future<Map<Integer, Future<V>>} in the admin API result type. * For example, see {@link org.apache.kafka.clients.admin.ListTransactionsResult}. */ public class AllBrokersStrategy implements AdminApiLookupStrategy<AllBrokersStrategy.BrokerKey> { public static final BrokerKey ANY_BROKER = new BrokerKey(OptionalInt.empty()); public static final Set<BrokerKey> LOOKUP_KEYS = Collections.singleton(ANY_BROKER); private static final ApiRequestScope SINGLE_REQUEST_SCOPE = new ApiRequestScope() { }; private final Logger log; public AllBrokersStrategy( LogContext logContext ) { this.log = logContext.logger(AllBrokersStrategy.class); } @Override public ApiRequestScope lookupScope(BrokerKey key) { return SINGLE_REQUEST_SCOPE; } @Override public MetadataRequest.Builder buildRequest(Set<BrokerKey> keys) { validateLookupKeys(keys); // Send empty `Metadata` request. We are only interested in the brokers from the response return new MetadataRequest.Builder(new MetadataRequestData()); } @Override public LookupResult<BrokerKey> handleResponse(Set<BrokerKey> keys, AbstractResponse abstractResponse) { validateLookupKeys(keys); MetadataResponse response = (MetadataResponse) abstractResponse; MetadataResponseData.MetadataResponseBrokerCollection brokers = response.data().brokers(); if (brokers.isEmpty()) { log.debug("Metadata response contained no brokers. Will backoff and retry"); return LookupResult.empty(); } else { log.debug("Discovered all brokers {} to send requests to", brokers); } Map<BrokerKey, Integer> brokerKeys = brokers.stream().collect(Collectors.toMap( broker -> new BrokerKey(OptionalInt.of(broker.nodeId())), MetadataResponseData.MetadataResponseBroker::nodeId )); return new LookupResult<>( Collections.singletonList(ANY_BROKER), Collections.emptyMap(), brokerKeys ); } private void validateLookupKeys(Set<BrokerKey> keys) { if (keys.size() != 1) { throw new IllegalArgumentException("Unexpected key set: " + keys); } BrokerKey key = keys.iterator().next(); if (key != ANY_BROKER) { throw new IllegalArgumentException("Unexpected key set: " + keys); } } public static class BrokerKey { public final OptionalInt brokerId; public BrokerKey(OptionalInt brokerId) { this.brokerId = brokerId; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; BrokerKey that = (BrokerKey) o; return Objects.equals(brokerId, that.brokerId); } @Override public int hashCode() { return Objects.hash(brokerId); } @Override public String toString() { return "BrokerKey(" + "brokerId=" + brokerId + ')'; } } public static class AllBrokersFuture<V> implements AdminApiFuture<BrokerKey, V> { private final KafkaFutureImpl<Map<Integer, KafkaFutureImpl<V>>> future = new KafkaFutureImpl<>(); private final Map<Integer, KafkaFutureImpl<V>> brokerFutures = new HashMap<>(); @Override public Set<BrokerKey> lookupKeys() { return LOOKUP_KEYS; } @Override public void completeLookup(Map<BrokerKey, Integer> brokerMapping) { brokerMapping.forEach((brokerKey, brokerId) -> { if (brokerId != brokerKey.brokerId.orElse(-1)) { throw new IllegalArgumentException("Invalid lookup mapping " + brokerKey + " -> " + brokerId); } brokerFutures.put(brokerId, new KafkaFutureImpl<>()); }); future.complete(brokerFutures); } @Override public void completeLookupExceptionally(Map<BrokerKey, Throwable> lookupErrors) { if (!LOOKUP_KEYS.equals(lookupErrors.keySet())) { throw new IllegalArgumentException("Unexpected keys among lookup errors: " + lookupErrors); } future.completeExceptionally(lookupErrors.get(ANY_BROKER)); } @Override public void complete(Map<BrokerKey, V> values) { values.forEach(this::complete); } private void complete(AllBrokersStrategy.BrokerKey key, V value) { if (key == ANY_BROKER) { throw new IllegalArgumentException("Invalid attempt to complete with lookup key sentinel"); } else { futureOrThrow(key).complete(value); } } @Override public void completeExceptionally(Map<BrokerKey, Throwable> errors) { errors.forEach(this::completeExceptionally); } private void completeExceptionally(AllBrokersStrategy.BrokerKey key, Throwable t) { if (key == ANY_BROKER) { future.completeExceptionally(t); } else { futureOrThrow(key).completeExceptionally(t); } } public KafkaFutureImpl<Map<Integer, KafkaFutureImpl<V>>> all() { return future; } private KafkaFutureImpl<V> futureOrThrow(BrokerKey key) { if (!key.brokerId.isPresent()) { throw new IllegalArgumentException("Attempt to complete with invalid key: " + key); } else { int brokerId = key.brokerId.getAsInt(); KafkaFutureImpl<V> future = brokerFutures.get(brokerId); if (future == null) { throw new IllegalArgumentException("Attempt to complete with unknown broker id: " + brokerId); } else { return future; } } } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/AlterConsumerGroupOffsetsHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import static java.util.Collections.singleton; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitRequestData.OffsetCommitRequestPartition; import org.apache.kafka.common.message.OffsetCommitRequestData.OffsetCommitRequestTopic; import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponsePartition; import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponseTopic; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.OffsetCommitRequest; import org.apache.kafka.common.requests.OffsetCommitResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; public class AlterConsumerGroupOffsetsHandler extends AdminApiHandler.Batched<CoordinatorKey, Map<TopicPartition, Errors>> { private final CoordinatorKey groupId; private final Map<TopicPartition, OffsetAndMetadata> offsets; private final Logger log; private final AdminApiLookupStrategy<CoordinatorKey> lookupStrategy; public AlterConsumerGroupOffsetsHandler( String groupId, Map<TopicPartition, OffsetAndMetadata> offsets, LogContext logContext ) { this.groupId = CoordinatorKey.byGroupId(groupId); this.offsets = offsets; this.log = logContext.logger(AlterConsumerGroupOffsetsHandler.class); this.lookupStrategy = new CoordinatorStrategy(CoordinatorType.GROUP, logContext); } @Override public String apiName() { return "offsetCommit"; } @Override public AdminApiLookupStrategy<CoordinatorKey> lookupStrategy() { return lookupStrategy; } public static AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, Errors>> newFuture( String groupId ) { return AdminApiFuture.forKeys(Collections.singleton(CoordinatorKey.byGroupId(groupId))); } private void validateKeys(Set<CoordinatorKey> groupIds) { if (!groupIds.equals(singleton(groupId))) { throw new IllegalArgumentException("Received unexpected group ids " + groupIds + " (expected only " + singleton(groupId) + ")"); } } @Override public OffsetCommitRequest.Builder buildBatchedRequest( int coordinatorId, Set<CoordinatorKey> groupIds ) { validateKeys(groupIds); Map<String, OffsetCommitRequestTopic> offsetData = new HashMap<>(); offsets.forEach((topicPartition, offsetAndMetadata) -> { OffsetCommitRequestTopic topic = offsetData.computeIfAbsent( topicPartition.topic(), key -> new OffsetCommitRequestTopic().setName(topicPartition.topic()) ); topic.partitions().add(new OffsetCommitRequestPartition() .setCommittedOffset(offsetAndMetadata.offset()) .setCommittedLeaderEpoch(offsetAndMetadata.leaderEpoch().orElse(-1)) .setCommittedMetadata(offsetAndMetadata.metadata()) .setPartitionIndex(topicPartition.partition())); }); OffsetCommitRequestData data = new OffsetCommitRequestData() .setGroupId(groupId.idValue) .setTopics(new ArrayList<>(offsetData.values())); return new OffsetCommitRequest.Builder(data); } @Override public ApiResult<CoordinatorKey, Map<TopicPartition, Errors>> handleResponse( Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse ) { validateKeys(groupIds); final OffsetCommitResponse response = (OffsetCommitResponse) abstractResponse; final Set<CoordinatorKey> groupsToUnmap = new HashSet<>(); final Set<CoordinatorKey> groupsToRetry = new HashSet<>(); final Map<TopicPartition, Errors> partitionResults = new HashMap<>(); for (OffsetCommitResponseTopic topic : response.data().topics()) { for (OffsetCommitResponsePartition partition : topic.partitions()) { TopicPartition topicPartition = new TopicPartition(topic.name(), partition.partitionIndex()); Errors error = Errors.forCode(partition.errorCode()); if (error != Errors.NONE) { handleError( groupId, topicPartition, error, partitionResults, groupsToUnmap, groupsToRetry ); } else { partitionResults.put(topicPartition, error); } } } if (groupsToUnmap.isEmpty() && groupsToRetry.isEmpty()) { return ApiResult.completed(groupId, partitionResults); } else { return ApiResult.unmapped(new ArrayList<>(groupsToUnmap)); } } private void handleError( CoordinatorKey groupId, TopicPartition topicPartition, Errors error, Map<TopicPartition, Errors> partitionResults, Set<CoordinatorKey> groupsToUnmap, Set<CoordinatorKey> groupsToRetry ) { switch (error) { // If the coordinator is in the middle of loading, or rebalance is in progress, then we just need to retry. case COORDINATOR_LOAD_IN_PROGRESS: case REBALANCE_IN_PROGRESS: log.debug("OffsetCommit request for group id {} returned error {}. Will retry.", groupId.idValue, error); groupsToRetry.add(groupId); break; // If the coordinator is not available, then we unmap and retry. case COORDINATOR_NOT_AVAILABLE: case NOT_COORDINATOR: log.debug("OffsetCommit request for group id {} returned error {}. Will rediscover the coordinator and retry.", groupId.idValue, error); groupsToUnmap.add(groupId); break; // Group level errors. case INVALID_GROUP_ID: case INVALID_COMMIT_OFFSET_SIZE: case GROUP_AUTHORIZATION_FAILED: // Member level errors. case UNKNOWN_MEMBER_ID: log.debug("OffsetCommit request for group id {} failed due to error {}.", groupId.idValue, error); partitionResults.put(topicPartition, error); break; // TopicPartition level errors. case UNKNOWN_TOPIC_OR_PARTITION: case OFFSET_METADATA_TOO_LARGE: case TOPIC_AUTHORIZATION_FAILED: log.debug("OffsetCommit request for group id {} and partition {} failed due" + " to error {}.", groupId.idValue, topicPartition, error); partitionResults.put(topicPartition, error); break; // Unexpected errors. default: log.error("OffsetCommit request for group id {} and partition {} failed due" + " to unexpected error {}.", groupId.idValue, topicPartition, error); partitionResults.put(topicPartition, error); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/ApiRequestScope.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import java.util.OptionalInt; /** * This interface is used by {@link AdminApiDriver} to bridge the gap * to the internal `NodeProvider` defined in * {@link org.apache.kafka.clients.admin.KafkaAdminClient}. However, a * request scope is more than just a target broker specification. It also * provides a way to group key lookups according to different batching * mechanics. See {@link AdminApiLookupStrategy#lookupScope(Object)} for * more detail. */ public interface ApiRequestScope { /** * Get the target broker ID that a request is intended for or * empty if the request can be sent to any broker. * * Note that if the destination broker ID is present in the * {@link ApiRequestScope} returned by {@link AdminApiLookupStrategy#lookupScope(Object)}, * then no lookup will be attempted. * * @return optional broker ID */ default OptionalInt destinationBrokerId() { return OptionalInt.empty(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/CoordinatorKey.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.common.requests.FindCoordinatorRequest; import java.util.Objects; public class CoordinatorKey { public final String idValue; public final FindCoordinatorRequest.CoordinatorType type; private CoordinatorKey(FindCoordinatorRequest.CoordinatorType type, String idValue) { this.idValue = idValue; this.type = type; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; CoordinatorKey that = (CoordinatorKey) o; return Objects.equals(idValue, that.idValue) && type == that.type; } @Override public int hashCode() { return Objects.hash(idValue, type); } @Override public String toString() { return "CoordinatorKey(" + "idValue='" + idValue + '\'' + ", type=" + type + ')'; } public static CoordinatorKey byGroupId(String groupId) { return new CoordinatorKey(FindCoordinatorRequest.CoordinatorType.GROUP, groupId); } public static CoordinatorKey byTransactionalId(String transactionalId) { return new CoordinatorKey(FindCoordinatorRequest.CoordinatorType.TRANSACTION, transactionalId); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/CoordinatorStrategy.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.InvalidGroupIdException; import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.message.FindCoordinatorRequestData; import org.apache.kafka.common.message.FindCoordinatorResponseData.Coordinator; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.requests.FindCoordinatorResponse; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; public class CoordinatorStrategy implements AdminApiLookupStrategy<CoordinatorKey> { private static final ApiRequestScope BATCH_REQUEST_SCOPE = new ApiRequestScope() { }; private final Logger log; private final FindCoordinatorRequest.CoordinatorType type; private Set<CoordinatorKey> unrepresentableKeys = Collections.emptySet(); boolean batch = true; public CoordinatorStrategy( FindCoordinatorRequest.CoordinatorType type, LogContext logContext ) { this.type = type; this.log = logContext.logger(CoordinatorStrategy.class); } @Override public ApiRequestScope lookupScope(CoordinatorKey key) { if (batch) { return BATCH_REQUEST_SCOPE; } else { // If the `FindCoordinator` API does not support batched lookups, we use a // separate lookup context for each coordinator key we need to lookup return new LookupRequestScope(key); } } @Override public FindCoordinatorRequest.Builder buildRequest(Set<CoordinatorKey> keys) { unrepresentableKeys = keys.stream().filter(k -> k == null || !isRepresentableKey(k.idValue)).collect(Collectors.toSet()); Set<CoordinatorKey> representableKeys = keys.stream().filter(k -> k != null && isRepresentableKey(k.idValue)).collect(Collectors.toSet()); if (batch) { ensureSameType(representableKeys); FindCoordinatorRequestData data = new FindCoordinatorRequestData() .setKeyType(type.id()) .setCoordinatorKeys(representableKeys.stream().map(k -> k.idValue).collect(Collectors.toList())); return new FindCoordinatorRequest.Builder(data); } else { CoordinatorKey key = requireSingletonAndType(representableKeys); return new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() .setKey(key.idValue) .setKeyType(key.type.id()) ); } } @Override public LookupResult<CoordinatorKey> handleResponse( Set<CoordinatorKey> keys, AbstractResponse abstractResponse ) { Map<CoordinatorKey, Integer> mappedKeys = new HashMap<>(); Map<CoordinatorKey, Throwable> failedKeys = new HashMap<>(); for (CoordinatorKey key : unrepresentableKeys) { failedKeys.put(key, new InvalidGroupIdException("The given group id '" + key.idValue + "' cannot be represented in a request.")); } for (Coordinator coordinator : ((FindCoordinatorResponse) abstractResponse).coordinators()) { CoordinatorKey key; if (coordinator.key() == null) // old version without batching key = requireSingletonAndType(keys); else { key = (type == CoordinatorType.GROUP) ? CoordinatorKey.byGroupId(coordinator.key()) : CoordinatorKey.byTransactionalId(coordinator.key()); } handleError(Errors.forCode(coordinator.errorCode()), key, coordinator.nodeId(), mappedKeys, failedKeys); } return new LookupResult<>(failedKeys, mappedKeys); } public void disableBatch() { batch = false; } public boolean batch() { return batch; } private CoordinatorKey requireSingletonAndType(Set<CoordinatorKey> keys) { if (keys.size() != 1) { throw new IllegalArgumentException("Unexpected size of key set: expected 1, but got " + keys.size()); } CoordinatorKey key = keys.iterator().next(); if (key.type != type) { throw new IllegalArgumentException("Unexpected key type: expected key to be of type " + type + ", but got " + key.type); } return key; } private void ensureSameType(Set<CoordinatorKey> keys) { if (keys.size() < 1) { throw new IllegalArgumentException("Unexpected size of key set: expected >= 1, but got " + keys.size()); } if (keys.stream().filter(k -> k.type == type).collect(Collectors.toSet()).size() != keys.size()) { throw new IllegalArgumentException("Unexpected key set: expected all key to be of type " + type + ", but some key were not"); } } private static boolean isRepresentableKey(String groupId) { return groupId != null; } private void handleError(Errors error, CoordinatorKey key, int nodeId, Map<CoordinatorKey, Integer> mappedKeys, Map<CoordinatorKey, Throwable> failedKeys) { switch (error) { case NONE: mappedKeys.put(key, nodeId); break; case COORDINATOR_NOT_AVAILABLE: case COORDINATOR_LOAD_IN_PROGRESS: log.debug("FindCoordinator request for key {} returned topic-level error {}. Will retry", key, error); break; case GROUP_AUTHORIZATION_FAILED: failedKeys.put(key, new GroupAuthorizationException("FindCoordinator request for groupId " + "`" + key + "` failed due to authorization failure", key.idValue)); break; case TRANSACTIONAL_ID_AUTHORIZATION_FAILED: failedKeys.put(key, new TransactionalIdAuthorizationException("FindCoordinator request for " + "transactionalId `" + key + "` failed due to authorization failure")); break; default: failedKeys.put(key, error.exception("FindCoordinator request for key " + "`" + key + "` failed due to an unexpected error")); } } private static class LookupRequestScope implements ApiRequestScope { final CoordinatorKey key; private LookupRequestScope(CoordinatorKey key) { this.key = key; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; LookupRequestScope that = (LookupRequestScope) o; return Objects.equals(key, that.key); } @Override public int hashCode() { return Objects.hash(key); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.OffsetDeleteRequestData; import org.apache.kafka.common.message.OffsetDeleteRequestData.OffsetDeleteRequestPartition; import org.apache.kafka.common.message.OffsetDeleteRequestData.OffsetDeleteRequestTopic; import org.apache.kafka.common.message.OffsetDeleteRequestData.OffsetDeleteRequestTopicCollection; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.OffsetDeleteRequest; import org.apache.kafka.common.requests.OffsetDeleteResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; public class DeleteConsumerGroupOffsetsHandler extends AdminApiHandler.Batched<CoordinatorKey, Map<TopicPartition, Errors>> { private final CoordinatorKey groupId; private final Set<TopicPartition> partitions; private final Logger log; private final AdminApiLookupStrategy<CoordinatorKey> lookupStrategy; public DeleteConsumerGroupOffsetsHandler( String groupId, Set<TopicPartition> partitions, LogContext logContext ) { this.groupId = CoordinatorKey.byGroupId(groupId); this.partitions = partitions; this.log = logContext.logger(DeleteConsumerGroupOffsetsHandler.class); this.lookupStrategy = new CoordinatorStrategy(CoordinatorType.GROUP, logContext); } @Override public String apiName() { return "offsetDelete"; } @Override public AdminApiLookupStrategy<CoordinatorKey> lookupStrategy() { return lookupStrategy; } public static AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, Errors>> newFuture( String groupId ) { return AdminApiFuture.forKeys(Collections.singleton(CoordinatorKey.byGroupId(groupId))); } private void validateKeys(Set<CoordinatorKey> groupIds) { if (!groupIds.equals(Collections.singleton(groupId))) { throw new IllegalArgumentException("Received unexpected group ids " + groupIds + " (expected only " + Collections.singleton(groupId) + ")"); } } @Override public OffsetDeleteRequest.Builder buildBatchedRequest(int coordinatorId, Set<CoordinatorKey> groupIds) { validateKeys(groupIds); final OffsetDeleteRequestTopicCollection topics = new OffsetDeleteRequestTopicCollection(); partitions.stream().collect(Collectors.groupingBy(TopicPartition::topic)).forEach((topic, topicPartitions) -> topics.add( new OffsetDeleteRequestTopic() .setName(topic) .setPartitions(topicPartitions.stream() .map(tp -> new OffsetDeleteRequestPartition().setPartitionIndex(tp.partition())) .collect(Collectors.toList()) ) )); return new OffsetDeleteRequest.Builder( new OffsetDeleteRequestData() .setGroupId(groupId.idValue) .setTopics(topics) ); } @Override public ApiResult<CoordinatorKey, Map<TopicPartition, Errors>> handleResponse( Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse ) { validateKeys(groupIds); final OffsetDeleteResponse response = (OffsetDeleteResponse) abstractResponse; final Errors error = Errors.forCode(response.data().errorCode()); if (error != Errors.NONE) { final Map<CoordinatorKey, Throwable> failed = new HashMap<>(); final Set<CoordinatorKey> groupsToUnmap = new HashSet<>(); handleGroupError(groupId, error, failed, groupsToUnmap); return new ApiResult<>(Collections.emptyMap(), failed, new ArrayList<>(groupsToUnmap)); } else { final Map<TopicPartition, Errors> partitionResults = new HashMap<>(); response.data().topics().forEach(topic -> topic.partitions().forEach(partition -> partitionResults.put( new TopicPartition(topic.name(), partition.partitionIndex()), Errors.forCode(partition.errorCode()) ) ) ); return ApiResult.completed(groupId, partitionResults); } } private void handleGroupError( CoordinatorKey groupId, Errors error, Map<CoordinatorKey, Throwable> failed, Set<CoordinatorKey> groupsToUnmap ) { switch (error) { case GROUP_AUTHORIZATION_FAILED: case GROUP_ID_NOT_FOUND: case INVALID_GROUP_ID: case NON_EMPTY_GROUP: log.debug("`OffsetDelete` request for group id {} failed due to error {}.", groupId.idValue, error); failed.put(groupId, error.exception()); break; case COORDINATOR_LOAD_IN_PROGRESS: // If the coordinator is in the middle of loading, then we just need to retry log.debug("`OffsetDelete` request for group id {} failed because the coordinator" + " is still in the process of loading state. Will retry.", groupId.idValue); break; case COORDINATOR_NOT_AVAILABLE: case NOT_COORDINATOR: // If the coordinator is unavailable or there was a coordinator change, then we unmap // the key so that we retry the `FindCoordinator` request log.debug("`OffsetDelete` request for group id {} returned error {}. " + "Will attempt to find the coordinator again and retry.", groupId.idValue, error); groupsToUnmap.add(groupId); break; default: log.error("`OffsetDelete` request for group id {} failed due to unexpected error {}.", groupId.idValue, error); failed.put(groupId, error.exception()); break; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import org.apache.kafka.common.Node; import org.apache.kafka.common.message.DeleteGroupsRequestData; import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResult; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.DeleteGroupsRequest; import org.apache.kafka.common.requests.DeleteGroupsResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; public class DeleteConsumerGroupsHandler extends AdminApiHandler.Batched<CoordinatorKey, Void> { private final Logger log; private final AdminApiLookupStrategy<CoordinatorKey> lookupStrategy; public DeleteConsumerGroupsHandler( LogContext logContext ) { this.log = logContext.logger(DeleteConsumerGroupsHandler.class); this.lookupStrategy = new CoordinatorStrategy(CoordinatorType.GROUP, logContext); } @Override public String apiName() { return "deleteConsumerGroups"; } @Override public AdminApiLookupStrategy<CoordinatorKey> lookupStrategy() { return lookupStrategy; } public static AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, Void> newFuture( Collection<String> groupIds ) { return AdminApiFuture.forKeys(buildKeySet(groupIds)); } private static Set<CoordinatorKey> buildKeySet(Collection<String> groupIds) { return groupIds.stream() .map(CoordinatorKey::byGroupId) .collect(Collectors.toSet()); } @Override public DeleteGroupsRequest.Builder buildBatchedRequest( int coordinatorId, Set<CoordinatorKey> keys ) { List<String> groupIds = keys.stream().map(key -> key.idValue).collect(Collectors.toList()); DeleteGroupsRequestData data = new DeleteGroupsRequestData() .setGroupsNames(groupIds); return new DeleteGroupsRequest.Builder(data); } @Override public ApiResult<CoordinatorKey, Void> handleResponse( Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse ) { final DeleteGroupsResponse response = (DeleteGroupsResponse) abstractResponse; final Map<CoordinatorKey, Void> completed = new HashMap<>(); final Map<CoordinatorKey, Throwable> failed = new HashMap<>(); final Set<CoordinatorKey> groupsToUnmap = new HashSet<>(); for (DeletableGroupResult deletedGroup : response.data().results()) { CoordinatorKey groupIdKey = CoordinatorKey.byGroupId(deletedGroup.groupId()); Errors error = Errors.forCode(deletedGroup.errorCode()); if (error != Errors.NONE) { handleError(groupIdKey, error, failed, groupsToUnmap); continue; } completed.put(groupIdKey, null); } return new ApiResult<>(completed, failed, new ArrayList<>(groupsToUnmap)); } private void handleError( CoordinatorKey groupId, Errors error, Map<CoordinatorKey, Throwable> failed, Set<CoordinatorKey> groupsToUnmap ) { switch (error) { case GROUP_AUTHORIZATION_FAILED: case INVALID_GROUP_ID: case NON_EMPTY_GROUP: case GROUP_ID_NOT_FOUND: log.debug("`DeleteConsumerGroups` request for group id {} failed due to error {}", groupId.idValue, error); failed.put(groupId, error.exception()); break; case COORDINATOR_LOAD_IN_PROGRESS: // If the coordinator is in the middle of loading, then we just need to retry log.debug("`DeleteConsumerGroups` request for group id {} failed because the coordinator " + "is still in the process of loading state. Will retry", groupId.idValue); break; case COORDINATOR_NOT_AVAILABLE: case NOT_COORDINATOR: // If the coordinator is unavailable or there was a coordinator change, then we unmap // the key so that we retry the `FindCoordinator` request log.debug("`DeleteConsumerGroups` request for group id {} returned error {}. " + "Will attempt to find the coordinator again and retry", groupId.idValue, error); groupsToUnmap.add(groupId); break; default: log.error("`DeleteConsumerGroups` request for group id {} failed due to unexpected error {}", groupId.idValue, error); failed.put(groupId, error.exception()); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import org.apache.kafka.clients.admin.ConsumerGroupDescription; import org.apache.kafka.clients.admin.MemberAssignment; import org.apache.kafka.clients.admin.MemberDescription; import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment; import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; import org.apache.kafka.common.ConsumerGroupState; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.acl.AclOperation; import org.apache.kafka.common.message.DescribeGroupsRequestData; import org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroup; import org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroupMember; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.DescribeGroupsRequest; import org.apache.kafka.common.requests.DescribeGroupsResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; public class DescribeConsumerGroupsHandler extends AdminApiHandler.Batched<CoordinatorKey, ConsumerGroupDescription> { private final boolean includeAuthorizedOperations; private final Logger log; private final AdminApiLookupStrategy<CoordinatorKey> lookupStrategy; public DescribeConsumerGroupsHandler( boolean includeAuthorizedOperations, LogContext logContext ) { this.includeAuthorizedOperations = includeAuthorizedOperations; this.log = logContext.logger(DescribeConsumerGroupsHandler.class); this.lookupStrategy = new CoordinatorStrategy(CoordinatorType.GROUP, logContext); } private static Set<CoordinatorKey> buildKeySet(Collection<String> groupIds) { return groupIds.stream() .map(CoordinatorKey::byGroupId) .collect(Collectors.toSet()); } public static AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, ConsumerGroupDescription> newFuture( Collection<String> groupIds ) { return AdminApiFuture.forKeys(buildKeySet(groupIds)); } @Override public String apiName() { return "describeGroups"; } @Override public AdminApiLookupStrategy<CoordinatorKey> lookupStrategy() { return lookupStrategy; } @Override public DescribeGroupsRequest.Builder buildBatchedRequest(int coordinatorId, Set<CoordinatorKey> keys) { List<String> groupIds = keys.stream().map(key -> { if (key.type != FindCoordinatorRequest.CoordinatorType.GROUP) { throw new IllegalArgumentException("Invalid transaction coordinator key " + key + " when building `DescribeGroups` request"); } return key.idValue; }).collect(Collectors.toList()); DescribeGroupsRequestData data = new DescribeGroupsRequestData() .setGroups(groupIds) .setIncludeAuthorizedOperations(includeAuthorizedOperations); return new DescribeGroupsRequest.Builder(data); } @Override public ApiResult<CoordinatorKey, ConsumerGroupDescription> handleResponse( Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse ) { final DescribeGroupsResponse response = (DescribeGroupsResponse) abstractResponse; final Map<CoordinatorKey, ConsumerGroupDescription> completed = new HashMap<>(); final Map<CoordinatorKey, Throwable> failed = new HashMap<>(); final Set<CoordinatorKey> groupsToUnmap = new HashSet<>(); for (DescribedGroup describedGroup : response.data().groups()) { CoordinatorKey groupIdKey = CoordinatorKey.byGroupId(describedGroup.groupId()); Errors error = Errors.forCode(describedGroup.errorCode()); if (error != Errors.NONE) { handleError(groupIdKey, error, failed, groupsToUnmap); continue; } final String protocolType = describedGroup.protocolType(); if (protocolType.equals(ConsumerProtocol.PROTOCOL_TYPE) || protocolType.isEmpty()) { final List<DescribedGroupMember> members = describedGroup.members(); final List<MemberDescription> memberDescriptions = new ArrayList<>(members.size()); final Set<AclOperation> authorizedOperations = validAclOperations(describedGroup.authorizedOperations()); for (DescribedGroupMember groupMember : members) { Set<TopicPartition> partitions = Collections.emptySet(); if (groupMember.memberAssignment().length > 0) { final Assignment assignment = ConsumerProtocol. deserializeAssignment(ByteBuffer.wrap(groupMember.memberAssignment())); partitions = new HashSet<>(assignment.partitions()); } memberDescriptions.add(new MemberDescription( groupMember.memberId(), Optional.ofNullable(groupMember.groupInstanceId()), groupMember.clientId(), groupMember.clientHost(), new MemberAssignment(partitions))); } final ConsumerGroupDescription consumerGroupDescription = new ConsumerGroupDescription(groupIdKey.idValue, protocolType.isEmpty(), memberDescriptions, describedGroup.protocolData(), ConsumerGroupState.parse(describedGroup.groupState()), coordinator, authorizedOperations); completed.put(groupIdKey, consumerGroupDescription); } else { failed.put(groupIdKey, new IllegalArgumentException( String.format("GroupId %s is not a consumer group (%s).", groupIdKey.idValue, protocolType))); } } return new ApiResult<>(completed, failed, new ArrayList<>(groupsToUnmap)); } private void handleError( CoordinatorKey groupId, Errors error, Map<CoordinatorKey, Throwable> failed, Set<CoordinatorKey> groupsToUnmap ) { switch (error) { case GROUP_AUTHORIZATION_FAILED: log.debug("`DescribeGroups` request for group id {} failed due to error {}", groupId.idValue, error); failed.put(groupId, error.exception()); break; case COORDINATOR_LOAD_IN_PROGRESS: // If the coordinator is in the middle of loading, then we just need to retry log.debug("`DescribeGroups` request for group id {} failed because the coordinator " + "is still in the process of loading state. Will retry", groupId.idValue); break; case COORDINATOR_NOT_AVAILABLE: case NOT_COORDINATOR: // If the coordinator is unavailable or there was a coordinator change, then we unmap // the key so that we retry the `FindCoordinator` request log.debug("`DescribeGroups` request for group id {} returned error {}. " + "Will attempt to find the coordinator again and retry", groupId.idValue, error); groupsToUnmap.add(groupId); break; default: log.error("`DescribeGroups` request for group id {} failed due to unexpected error {}", groupId.idValue, error); failed.put(groupId, error.exception()); } } private Set<AclOperation> validAclOperations(final int authorizedOperations) { if (authorizedOperations == MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED) { return null; } return Utils.from32BitField(authorizedOperations) .stream() .map(AclOperation::fromCode) .filter(operation -> operation != AclOperation.UNKNOWN && operation != AclOperation.ALL && operation != AclOperation.ANY) .collect(Collectors.toSet()); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/DescribeProducersHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.clients.admin.DescribeProducersOptions; import org.apache.kafka.clients.admin.DescribeProducersResult.PartitionProducerState; import org.apache.kafka.clients.admin.ProducerState; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.message.DescribeProducersRequestData; import org.apache.kafka.common.message.DescribeProducersResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.ApiError; import org.apache.kafka.common.requests.DescribeProducersRequest; import org.apache.kafka.common.requests.DescribeProducersResponse; import org.apache.kafka.common.utils.CollectionUtils; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.OptionalInt; import java.util.OptionalLong; import java.util.Set; import java.util.stream.Collectors; public class DescribeProducersHandler extends AdminApiHandler.Batched<TopicPartition, PartitionProducerState> { private final Logger log; private final DescribeProducersOptions options; private final AdminApiLookupStrategy<TopicPartition> lookupStrategy; public DescribeProducersHandler( DescribeProducersOptions options, LogContext logContext ) { this.options = options; this.log = logContext.logger(DescribeProducersHandler.class); if (options.brokerId().isPresent()) { this.lookupStrategy = new StaticBrokerStrategy<>(options.brokerId().getAsInt()); } else { this.lookupStrategy = new PartitionLeaderStrategy(logContext); } } public static AdminApiFuture.SimpleAdminApiFuture<TopicPartition, PartitionProducerState> newFuture( Collection<TopicPartition> topicPartitions ) { return AdminApiFuture.forKeys(new HashSet<>(topicPartitions)); } @Override public String apiName() { return "describeProducers"; } @Override public AdminApiLookupStrategy<TopicPartition> lookupStrategy() { return lookupStrategy; } @Override public DescribeProducersRequest.Builder buildBatchedRequest( int brokerId, Set<TopicPartition> topicPartitions ) { DescribeProducersRequestData request = new DescribeProducersRequestData(); DescribeProducersRequest.Builder builder = new DescribeProducersRequest.Builder(request); CollectionUtils.groupPartitionsByTopic( topicPartitions, builder::addTopic, (topicRequest, partitionId) -> topicRequest.partitionIndexes().add(partitionId) ); return builder; } private void handlePartitionError( TopicPartition topicPartition, ApiError apiError, Map<TopicPartition, Throwable> failed, List<TopicPartition> unmapped ) { switch (apiError.error()) { case NOT_LEADER_OR_FOLLOWER: if (options.brokerId().isPresent()) { // Typically these errors are retriable, but if the user specified the brokerId // explicitly, then they are fatal. int brokerId = options.brokerId().getAsInt(); log.error("Not leader error in `DescribeProducers` response for partition {} " + "for brokerId {} set in options", topicPartition, brokerId, apiError.exception()); failed.put(topicPartition, apiError.error().exception("Failed to describe active producers " + "for partition " + topicPartition + " on brokerId " + brokerId)); } else { // Otherwise, we unmap the partition so that we can find the new leader log.debug("Not leader error in `DescribeProducers` response for partition {}. " + "Will retry later.", topicPartition); unmapped.add(topicPartition); } break; case UNKNOWN_TOPIC_OR_PARTITION: log.debug("Unknown topic/partition error in `DescribeProducers` response for partition {}. " + "Will retry later.", topicPartition); break; case INVALID_TOPIC_EXCEPTION: log.error("Invalid topic in `DescribeProducers` response for partition {}", topicPartition, apiError.exception()); failed.put(topicPartition, new InvalidTopicException( "Failed to fetch metadata for partition " + topicPartition + " due to invalid topic error: " + apiError.messageWithFallback(), Collections.singleton(topicPartition.topic()))); break; case TOPIC_AUTHORIZATION_FAILED: log.error("Authorization failed in `DescribeProducers` response for partition {}", topicPartition, apiError.exception()); failed.put(topicPartition, new TopicAuthorizationException("Failed to describe " + "active producers for partition " + topicPartition + " due to authorization failure on topic" + " `" + topicPartition.topic() + "`", Collections.singleton(topicPartition.topic()))); break; default: log.error("Unexpected error in `DescribeProducers` response for partition {}", topicPartition, apiError.exception()); failed.put(topicPartition, apiError.error().exception("Failed to describe active " + "producers for partition " + topicPartition + " due to unexpected error")); break; } } @Override public ApiResult<TopicPartition, PartitionProducerState> handleResponse( Node broker, Set<TopicPartition> keys, AbstractResponse abstractResponse ) { DescribeProducersResponse response = (DescribeProducersResponse) abstractResponse; Map<TopicPartition, PartitionProducerState> completed = new HashMap<>(); Map<TopicPartition, Throwable> failed = new HashMap<>(); List<TopicPartition> unmapped = new ArrayList<>(); for (DescribeProducersResponseData.TopicResponse topicResponse : response.data().topics()) { for (DescribeProducersResponseData.PartitionResponse partitionResponse : topicResponse.partitions()) { TopicPartition topicPartition = new TopicPartition( topicResponse.name(), partitionResponse.partitionIndex()); Errors error = Errors.forCode(partitionResponse.errorCode()); if (error != Errors.NONE) { ApiError apiError = new ApiError(error, partitionResponse.errorMessage()); handlePartitionError(topicPartition, apiError, failed, unmapped); continue; } List<ProducerState> activeProducers = partitionResponse.activeProducers().stream() .map(activeProducer -> { OptionalLong currentTransactionFirstOffset = activeProducer.currentTxnStartOffset() < 0 ? OptionalLong.empty() : OptionalLong.of(activeProducer.currentTxnStartOffset()); OptionalInt coordinatorEpoch = activeProducer.coordinatorEpoch() < 0 ? OptionalInt.empty() : OptionalInt.of(activeProducer.coordinatorEpoch()); return new ProducerState( activeProducer.producerId(), activeProducer.producerEpoch(), activeProducer.lastSequence(), activeProducer.lastTimestamp(), coordinatorEpoch, currentTransactionFirstOffset ); }).collect(Collectors.toList()); completed.put(topicPartition, new PartitionProducerState(activeProducers)); } } return new ApiResult<>(completed, failed, unmapped); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/DescribeTransactionsHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.clients.admin.TransactionDescription; import org.apache.kafka.clients.admin.TransactionState; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.errors.TransactionalIdNotFoundException; import org.apache.kafka.common.message.DescribeTransactionsRequestData; import org.apache.kafka.common.message.DescribeTransactionsResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.DescribeTransactionsRequest; import org.apache.kafka.common.requests.DescribeTransactionsResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.OptionalLong; import java.util.Set; import java.util.stream.Collectors; public class DescribeTransactionsHandler extends AdminApiHandler.Batched<CoordinatorKey, TransactionDescription> { private final Logger log; private final AdminApiLookupStrategy<CoordinatorKey> lookupStrategy; public DescribeTransactionsHandler( LogContext logContext ) { this.log = logContext.logger(DescribeTransactionsHandler.class); this.lookupStrategy = new CoordinatorStrategy(CoordinatorType.TRANSACTION, logContext); } public static AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, TransactionDescription> newFuture( Collection<String> transactionalIds ) { return AdminApiFuture.forKeys(buildKeySet(transactionalIds)); } private static Set<CoordinatorKey> buildKeySet(Collection<String> transactionalIds) { return transactionalIds.stream() .map(CoordinatorKey::byTransactionalId) .collect(Collectors.toSet()); } @Override public String apiName() { return "describeTransactions"; } @Override public AdminApiLookupStrategy<CoordinatorKey> lookupStrategy() { return lookupStrategy; } @Override public DescribeTransactionsRequest.Builder buildBatchedRequest( int brokerId, Set<CoordinatorKey> keys ) { DescribeTransactionsRequestData request = new DescribeTransactionsRequestData(); List<String> transactionalIds = keys.stream().map(key -> { if (key.type != FindCoordinatorRequest.CoordinatorType.TRANSACTION) { throw new IllegalArgumentException("Invalid group coordinator key " + key + " when building `DescribeTransaction` request"); } return key.idValue; }).collect(Collectors.toList()); request.setTransactionalIds(transactionalIds); return new DescribeTransactionsRequest.Builder(request); } @Override public ApiResult<CoordinatorKey, TransactionDescription> handleResponse( Node broker, Set<CoordinatorKey> keys, AbstractResponse abstractResponse ) { DescribeTransactionsResponse response = (DescribeTransactionsResponse) abstractResponse; Map<CoordinatorKey, TransactionDescription> completed = new HashMap<>(); Map<CoordinatorKey, Throwable> failed = new HashMap<>(); List<CoordinatorKey> unmapped = new ArrayList<>(); for (DescribeTransactionsResponseData.TransactionState transactionState : response.data().transactionStates()) { CoordinatorKey transactionalIdKey = CoordinatorKey.byTransactionalId( transactionState.transactionalId()); if (!keys.contains(transactionalIdKey)) { log.warn("Response included transactionalId `{}`, which was not requested", transactionState.transactionalId()); continue; } Errors error = Errors.forCode(transactionState.errorCode()); if (error != Errors.NONE) { handleError(transactionalIdKey, error, failed, unmapped); continue; } OptionalLong transactionStartTimeMs = transactionState.transactionStartTimeMs() < 0 ? OptionalLong.empty() : OptionalLong.of(transactionState.transactionStartTimeMs()); completed.put(transactionalIdKey, new TransactionDescription( broker.id(), TransactionState.parse(transactionState.transactionState()), transactionState.producerId(), transactionState.producerEpoch(), transactionState.transactionTimeoutMs(), transactionStartTimeMs, collectTopicPartitions(transactionState) )); } return new ApiResult<>(completed, failed, unmapped); } private Set<TopicPartition> collectTopicPartitions( DescribeTransactionsResponseData.TransactionState transactionState ) { Set<TopicPartition> res = new HashSet<>(); for (DescribeTransactionsResponseData.TopicData topicData : transactionState.topics()) { String topic = topicData.topic(); for (Integer partitionId : topicData.partitions()) { res.add(new TopicPartition(topic, partitionId)); } } return res; } private void handleError( CoordinatorKey transactionalIdKey, Errors error, Map<CoordinatorKey, Throwable> failed, List<CoordinatorKey> unmapped ) { switch (error) { case TRANSACTIONAL_ID_AUTHORIZATION_FAILED: failed.put(transactionalIdKey, new TransactionalIdAuthorizationException( "DescribeTransactions request for transactionalId `" + transactionalIdKey.idValue + "` " + "failed due to authorization failure")); break; case TRANSACTIONAL_ID_NOT_FOUND: failed.put(transactionalIdKey, new TransactionalIdNotFoundException( "DescribeTransactions request for transactionalId `" + transactionalIdKey.idValue + "` " + "failed because the ID could not be found")); break; case COORDINATOR_LOAD_IN_PROGRESS: // If the coordinator is in the middle of loading, then we just need to retry log.debug("DescribeTransactions request for transactionalId `{}` failed because the " + "coordinator is still in the process of loading state. Will retry", transactionalIdKey.idValue); break; case NOT_COORDINATOR: case COORDINATOR_NOT_AVAILABLE: // If the coordinator is unavailable or there was a coordinator change, then we unmap // the key so that we retry the `FindCoordinator` request unmapped.add(transactionalIdKey); log.debug("DescribeTransactions request for transactionalId `{}` returned error {}. Will attempt " + "to find the coordinator again and retry", transactionalIdKey.idValue, error); break; default: failed.put(transactionalIdKey, error.exception("DescribeTransactions request for " + "transactionalId `" + transactionalIdKey.idValue + "` failed due to unexpected error")); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/FenceProducersHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.message.InitProducerIdRequestData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.InitProducerIdRequest; import org.apache.kafka.common.requests.InitProducerIdResponse; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.ProducerIdAndEpoch; import org.slf4j.Logger; import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; public class FenceProducersHandler extends AdminApiHandler.Unbatched<CoordinatorKey, ProducerIdAndEpoch> { private final Logger log; private final AdminApiLookupStrategy<CoordinatorKey> lookupStrategy; public FenceProducersHandler( LogContext logContext ) { this.log = logContext.logger(FenceProducersHandler.class); this.lookupStrategy = new CoordinatorStrategy(FindCoordinatorRequest.CoordinatorType.TRANSACTION, logContext); } public static AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, ProducerIdAndEpoch> newFuture( Collection<String> transactionalIds ) { return AdminApiFuture.forKeys(buildKeySet(transactionalIds)); } private static Set<CoordinatorKey> buildKeySet(Collection<String> transactionalIds) { return transactionalIds.stream() .map(CoordinatorKey::byTransactionalId) .collect(Collectors.toSet()); } @Override public String apiName() { return "fenceProducer"; } @Override public AdminApiLookupStrategy<CoordinatorKey> lookupStrategy() { return lookupStrategy; } @Override InitProducerIdRequest.Builder buildSingleRequest(int brokerId, CoordinatorKey key) { if (key.type != FindCoordinatorRequest.CoordinatorType.TRANSACTION) { throw new IllegalArgumentException("Invalid group coordinator key " + key + " when building `InitProducerId` request"); } InitProducerIdRequestData data = new InitProducerIdRequestData() // Because we never include a producer epoch or ID in this request, we expect that some errors // (such as PRODUCER_FENCED) will never be returned in the corresponding broker response. // If we ever modify this logic to include an epoch or producer ID, we will need to update the // error handling logic for this handler to accommodate these new errors. .setProducerEpoch(ProducerIdAndEpoch.NONE.epoch) .setProducerId(ProducerIdAndEpoch.NONE.producerId) .setTransactionalId(key.idValue) // Set transaction timeout to 1 since it's only being initialized to fence out older producers with the same transactional ID, // and shouldn't be used for any actual record writes .setTransactionTimeoutMs(1); return new InitProducerIdRequest.Builder(data); } @Override public ApiResult<CoordinatorKey, ProducerIdAndEpoch> handleSingleResponse( Node broker, CoordinatorKey key, AbstractResponse abstractResponse ) { InitProducerIdResponse response = (InitProducerIdResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); if (error != Errors.NONE) { return handleError(key, error); } Map<CoordinatorKey, ProducerIdAndEpoch> completed = Collections.singletonMap(key, new ProducerIdAndEpoch( response.data().producerId(), response.data().producerEpoch() )); return new ApiResult<>(completed, Collections.emptyMap(), Collections.emptyList()); } private ApiResult<CoordinatorKey, ProducerIdAndEpoch> handleError( CoordinatorKey transactionalIdKey, Errors error ) { switch (error) { case CLUSTER_AUTHORIZATION_FAILED: return ApiResult.failed(transactionalIdKey, new ClusterAuthorizationException( "InitProducerId request for transactionalId `" + transactionalIdKey.idValue + "` " + "failed due to cluster authorization failure")); case TRANSACTIONAL_ID_AUTHORIZATION_FAILED: return ApiResult.failed(transactionalIdKey, new TransactionalIdAuthorizationException( "InitProducerId request for transactionalId `" + transactionalIdKey.idValue + "` " + "failed due to transactional ID authorization failure")); case COORDINATOR_LOAD_IN_PROGRESS: // If the coordinator is in the middle of loading, then we just need to retry log.debug("InitProducerId request for transactionalId `{}` failed because the " + "coordinator is still in the process of loading state. Will retry", transactionalIdKey.idValue); return ApiResult.empty(); case NOT_COORDINATOR: case COORDINATOR_NOT_AVAILABLE: // If the coordinator is unavailable or there was a coordinator change, then we unmap // the key so that we retry the `FindCoordinator` request log.debug("InitProducerId request for transactionalId `{}` returned error {}. Will attempt " + "to find the coordinator again and retry", transactionalIdKey.idValue, error); return ApiResult.unmapped(Collections.singletonList(transactionalIdKey)); // We intentionally omit cases for PRODUCER_FENCED, TRANSACTIONAL_ID_NOT_FOUND, and INVALID_PRODUCER_EPOCH // since those errors should never happen when our InitProducerIdRequest doesn't include a producer epoch or ID // and should therefore fall under the "unexpected error" catch-all case below default: return ApiResult.failed(transactionalIdKey, error.exception("InitProducerId request for " + "transactionalId `" + transactionalIdKey.idValue + "` failed due to unexpected error")); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; public class ListConsumerGroupOffsetsHandler implements AdminApiHandler<CoordinatorKey, Map<TopicPartition, OffsetAndMetadata>> { private final boolean requireStable; private final Map<String, ListConsumerGroupOffsetsSpec> groupSpecs; private final Logger log; private final CoordinatorStrategy lookupStrategy; public ListConsumerGroupOffsetsHandler( Map<String, ListConsumerGroupOffsetsSpec> groupSpecs, boolean requireStable, LogContext logContext ) { this.log = logContext.logger(ListConsumerGroupOffsetsHandler.class); this.lookupStrategy = new CoordinatorStrategy(CoordinatorType.GROUP, logContext); this.groupSpecs = groupSpecs; this.requireStable = requireStable; } public static AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, OffsetAndMetadata>> newFuture(Collection<String> groupIds) { return AdminApiFuture.forKeys(coordinatorKeys(groupIds)); } @Override public String apiName() { return "offsetFetch"; } @Override public AdminApiLookupStrategy<CoordinatorKey> lookupStrategy() { return lookupStrategy; } private void validateKeys(Set<CoordinatorKey> groupIds) { Set<CoordinatorKey> keys = coordinatorKeys(groupSpecs.keySet()); if (!keys.containsAll(groupIds)) { throw new IllegalArgumentException("Received unexpected group ids " + groupIds + " (expected one of " + keys + ")"); } } private static Set<CoordinatorKey> coordinatorKeys(Collection<String> groupIds) { return groupIds.stream() .map(CoordinatorKey::byGroupId) .collect(Collectors.toSet()); } public OffsetFetchRequest.Builder buildBatchedRequest(Set<CoordinatorKey> groupIds) { // Create a map that only contains the consumer groups owned by the coordinator. Map<String, List<TopicPartition>> coordinatorGroupIdToTopicPartitions = new HashMap<>(groupIds.size()); groupIds.forEach(g -> { ListConsumerGroupOffsetsSpec spec = groupSpecs.get(g.idValue); List<TopicPartition> partitions = spec.topicPartitions() != null ? new ArrayList<>(spec.topicPartitions()) : null; coordinatorGroupIdToTopicPartitions.put(g.idValue, partitions); }); return new OffsetFetchRequest.Builder(coordinatorGroupIdToTopicPartitions, requireStable, false); } @Override public Collection<RequestAndKeys<CoordinatorKey>> buildRequest(int brokerId, Set<CoordinatorKey> groupIds) { validateKeys(groupIds); // When the OffsetFetchRequest fails with NoBatchedOffsetFetchRequestException, we completely disable // the batching end-to-end, including the FindCoordinatorRequest. if (lookupStrategy.batch()) { return Collections.singletonList(new RequestAndKeys<>(buildBatchedRequest(groupIds), groupIds)); } else { return groupIds.stream().map(groupId -> { Set<CoordinatorKey> keys = Collections.singleton(groupId); return new RequestAndKeys<>(buildBatchedRequest(keys), keys); }).collect(Collectors.toList()); } } @Override public ApiResult<CoordinatorKey, Map<TopicPartition, OffsetAndMetadata>> handleResponse( Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse ) { validateKeys(groupIds); final OffsetFetchResponse response = (OffsetFetchResponse) abstractResponse; Map<CoordinatorKey, Map<TopicPartition, OffsetAndMetadata>> completed = new HashMap<>(); Map<CoordinatorKey, Throwable> failed = new HashMap<>(); List<CoordinatorKey> unmapped = new ArrayList<>(); for (CoordinatorKey coordinatorKey : groupIds) { String group = coordinatorKey.idValue; if (response.groupHasError(group)) { handleGroupError(CoordinatorKey.byGroupId(group), response.groupLevelError(group), failed, unmapped); } else { final Map<TopicPartition, OffsetAndMetadata> groupOffsetsListing = new HashMap<>(); Map<TopicPartition, OffsetFetchResponse.PartitionData> responseData = response.partitionDataMap(group); for (Map.Entry<TopicPartition, OffsetFetchResponse.PartitionData> partitionEntry : responseData.entrySet()) { final TopicPartition topicPartition = partitionEntry.getKey(); OffsetFetchResponse.PartitionData partitionData = partitionEntry.getValue(); final Errors error = partitionData.error; if (error == Errors.NONE) { final long offset = partitionData.offset; final String metadata = partitionData.metadata; final Optional<Integer> leaderEpoch = partitionData.leaderEpoch; // Negative offset indicates that the group has no committed offset for this partition if (offset < 0) { groupOffsetsListing.put(topicPartition, null); } else { groupOffsetsListing.put(topicPartition, new OffsetAndMetadata(offset, leaderEpoch, metadata)); } } else { log.warn("Skipping return offset for {} due to error {}.", topicPartition, error); } } completed.put(CoordinatorKey.byGroupId(group), groupOffsetsListing); } } return new ApiResult<>(completed, failed, unmapped); } private void handleGroupError( CoordinatorKey groupId, Errors error, Map<CoordinatorKey, Throwable> failed, List<CoordinatorKey> groupsToUnmap ) { switch (error) { case GROUP_AUTHORIZATION_FAILED: log.debug("`OffsetFetch` request for group id {} failed due to error {}", groupId.idValue, error); failed.put(groupId, error.exception()); break; case COORDINATOR_LOAD_IN_PROGRESS: // If the coordinator is in the middle of loading, then we just need to retry log.debug("`OffsetFetch` request for group id {} failed because the coordinator " + "is still in the process of loading state. Will retry", groupId.idValue); break; case COORDINATOR_NOT_AVAILABLE: case NOT_COORDINATOR: // If the coordinator is unavailable or there was a coordinator change, then we unmap // the key so that we retry the `FindCoordinator` request log.debug("`OffsetFetch` request for group id {} returned error {}. " + "Will attempt to find the coordinator again and retry", groupId.idValue, error); groupsToUnmap.add(groupId); break; default: log.error("`OffsetFetch` request for group id {} failed due to unexpected error {}", groupId.idValue, error); failed.put(groupId, error.exception()); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.clients.admin.ListTransactionsOptions; import org.apache.kafka.clients.admin.TransactionListing; import org.apache.kafka.clients.admin.TransactionState; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.CoordinatorNotAvailableException; import org.apache.kafka.common.message.ListTransactionsRequestData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.ListTransactionsRequest; import org.apache.kafka.common.requests.ListTransactionsResponse; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Set; import java.util.stream.Collectors; public class ListTransactionsHandler extends AdminApiHandler.Batched<AllBrokersStrategy.BrokerKey, Collection<TransactionListing>> { private final Logger log; private final ListTransactionsOptions options; private final AllBrokersStrategy lookupStrategy; public ListTransactionsHandler( ListTransactionsOptions options, LogContext logContext ) { this.options = options; this.log = logContext.logger(ListTransactionsHandler.class); this.lookupStrategy = new AllBrokersStrategy(logContext); } public static AllBrokersStrategy.AllBrokersFuture<Collection<TransactionListing>> newFuture() { return new AllBrokersStrategy.AllBrokersFuture<>(); } @Override public String apiName() { return "listTransactions"; } @Override public AdminApiLookupStrategy<AllBrokersStrategy.BrokerKey> lookupStrategy() { return lookupStrategy; } @Override public ListTransactionsRequest.Builder buildBatchedRequest( int brokerId, Set<AllBrokersStrategy.BrokerKey> keys ) { ListTransactionsRequestData request = new ListTransactionsRequestData(); request.setProducerIdFilters(new ArrayList<>(options.filteredProducerIds())); request.setStateFilters(options.filteredStates().stream() .map(TransactionState::toString) .collect(Collectors.toList())); return new ListTransactionsRequest.Builder(request); } @Override public ApiResult<AllBrokersStrategy.BrokerKey, Collection<TransactionListing>> handleResponse( Node broker, Set<AllBrokersStrategy.BrokerKey> keys, AbstractResponse abstractResponse ) { int brokerId = broker.id(); AllBrokersStrategy.BrokerKey key = requireSingleton(keys, brokerId); ListTransactionsResponse response = (ListTransactionsResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS) { log.debug("The `ListTransactions` request sent to broker {} failed because the " + "coordinator is still loading state. Will try again after backing off", brokerId); return ApiResult.empty(); } else if (error == Errors.COORDINATOR_NOT_AVAILABLE) { log.debug("The `ListTransactions` request sent to broker {} failed because the " + "coordinator is shutting down", brokerId); return ApiResult.failed(key, new CoordinatorNotAvailableException("ListTransactions " + "request sent to broker " + brokerId + " failed because the coordinator is shutting down")); } else if (error != Errors.NONE) { log.error("The `ListTransactions` request sent to broker {} failed because of an " + "unexpected error {}", brokerId, error); return ApiResult.failed(key, error.exception("ListTransactions request " + "sent to broker " + brokerId + " failed with an unexpected exception")); } else { List<TransactionListing> listings = response.data().transactionStates().stream() .map(transactionState -> new TransactionListing( transactionState.transactionalId(), transactionState.producerId(), TransactionState.parse(transactionState.transactionState()))) .collect(Collectors.toList()); return ApiResult.completed(key, listings); } } private AllBrokersStrategy.BrokerKey requireSingleton( Set<AllBrokersStrategy.BrokerKey> keys, int brokerId ) { if (keys.size() != 1) { throw new IllegalArgumentException("Unexpected key set: " + keys); } AllBrokersStrategy.BrokerKey key = keys.iterator().next(); if (!key.brokerId.isPresent() || key.brokerId.getAsInt() != brokerId) { throw new IllegalArgumentException("Unexpected broker key: " + key); } return key; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/MetadataOperationContext.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import java.util.Collection; import java.util.Map; import java.util.Optional; import org.apache.kafka.clients.admin.AbstractOptions; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.InvalidMetadataException; import org.apache.kafka.common.internals.KafkaFutureImpl; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.requests.MetadataResponse.PartitionMetadata; import org.apache.kafka.common.requests.MetadataResponse.TopicMetadata; /** * Context class to encapsulate parameters of a call to fetch and use cluster metadata. * Some of the parameters are provided at construction and are immutable whereas others are provided * as "Call" are completed and values are available. * * @param <T> The type of return value of the KafkaFuture * @param <O> The type of configuration option. */ public final class MetadataOperationContext<T, O extends AbstractOptions<O>> { final private Collection<String> topics; final private O options; final private long deadline; final private Map<TopicPartition, KafkaFutureImpl<T>> futures; private Optional<MetadataResponse> response; public MetadataOperationContext(Collection<String> topics, O options, long deadline, Map<TopicPartition, KafkaFutureImpl<T>> futures) { this.topics = topics; this.options = options; this.deadline = deadline; this.futures = futures; this.response = Optional.empty(); } public void setResponse(Optional<MetadataResponse> response) { this.response = response; } public Optional<MetadataResponse> response() { return response; } public O options() { return options; } public long deadline() { return deadline; } public Map<TopicPartition, KafkaFutureImpl<T>> futures() { return futures; } public Collection<String> topics() { return topics; } public static void handleMetadataErrors(MetadataResponse response) { for (TopicMetadata tm : response.topicMetadata()) { for (PartitionMetadata pm : tm.partitionMetadata()) { if (shouldRefreshMetadata(pm.error)) { throw pm.error.exception(); } } } } public static boolean shouldRefreshMetadata(Errors error) { return error.exception() instanceof InvalidMetadataException; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.message.MetadataRequestData; import org.apache.kafka.common.message.MetadataResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.function.Function; /** * Base driver implementation for APIs which target partition leaders. */ public class PartitionLeaderStrategy implements AdminApiLookupStrategy<TopicPartition> { private static final ApiRequestScope SINGLE_REQUEST_SCOPE = new ApiRequestScope() { }; private final Logger log; public PartitionLeaderStrategy(LogContext logContext) { this.log = logContext.logger(PartitionLeaderStrategy.class); } @Override public ApiRequestScope lookupScope(TopicPartition key) { // Metadata requests can group topic partitions arbitrarily, so they can all share // the same request context return SINGLE_REQUEST_SCOPE; } @Override public MetadataRequest.Builder buildRequest(Set<TopicPartition> partitions) { MetadataRequestData request = new MetadataRequestData(); request.setAllowAutoTopicCreation(false); partitions.stream().map(TopicPartition::topic).distinct().forEach(topic -> request.topics().add(new MetadataRequestData.MetadataRequestTopic().setName(topic)) ); return new MetadataRequest.Builder(request); } private void handleTopicError( String topic, Errors topicError, Set<TopicPartition> requestPartitions, Map<TopicPartition, Throwable> failed ) { switch (topicError) { case UNKNOWN_TOPIC_OR_PARTITION: case LEADER_NOT_AVAILABLE: case BROKER_NOT_AVAILABLE: log.debug("Metadata request for topic {} returned topic-level error {}. Will retry", topic, topicError); break; case TOPIC_AUTHORIZATION_FAILED: log.error("Received authorization failure for topic {} in `Metadata` response", topic, topicError.exception()); failAllPartitionsForTopic(topic, requestPartitions, failed, tp -> new TopicAuthorizationException( "Failed to fetch metadata for partition " + tp + " due to topic authorization failure", Collections.singleton(topic))); break; case INVALID_TOPIC_EXCEPTION: log.error("Received invalid topic error for topic {} in `Metadata` response", topic, topicError.exception()); failAllPartitionsForTopic(topic, requestPartitions, failed, tp -> new InvalidTopicException( "Failed to fetch metadata for partition " + tp + " due to invalid topic `" + topic + "`", Collections.singleton(topic))); break; default: log.error("Received unexpected error for topic {} in `Metadata` response", topic, topicError.exception()); failAllPartitionsForTopic(topic, requestPartitions, failed, tp -> topicError.exception( "Failed to fetch metadata for partition " + tp + " due to unexpected error for topic `" + topic + "`")); } } private void failAllPartitionsForTopic( String topic, Set<TopicPartition> partitions, Map<TopicPartition, Throwable> failed, Function<TopicPartition, Throwable> exceptionGenerator ) { partitions.stream().filter(tp -> tp.topic().equals(topic)).forEach(tp -> { failed.put(tp, exceptionGenerator.apply(tp)); }); } private void handlePartitionError( TopicPartition topicPartition, Errors partitionError, Map<TopicPartition, Throwable> failed ) { switch (partitionError) { case NOT_LEADER_OR_FOLLOWER: case REPLICA_NOT_AVAILABLE: case LEADER_NOT_AVAILABLE: case BROKER_NOT_AVAILABLE: case KAFKA_STORAGE_ERROR: log.debug("Metadata request for partition {} returned partition-level error {}. Will retry", topicPartition, partitionError); break; default: log.error("Received unexpected error for partition {} in `Metadata` response", topicPartition, partitionError.exception()); failed.put(topicPartition, partitionError.exception( "Unexpected error during metadata lookup for " + topicPartition)); } } @Override public LookupResult<TopicPartition> handleResponse( Set<TopicPartition> requestPartitions, AbstractResponse abstractResponse ) { MetadataResponse response = (MetadataResponse) abstractResponse; Map<TopicPartition, Throwable> failed = new HashMap<>(); Map<TopicPartition, Integer> mapped = new HashMap<>(); for (MetadataResponseData.MetadataResponseTopic topicMetadata : response.data().topics()) { String topic = topicMetadata.name(); Errors topicError = Errors.forCode(topicMetadata.errorCode()); if (topicError != Errors.NONE) { handleTopicError(topic, topicError, requestPartitions, failed); continue; } for (MetadataResponseData.MetadataResponsePartition partitionMetadata : topicMetadata.partitions()) { TopicPartition topicPartition = new TopicPartition(topic, partitionMetadata.partitionIndex()); Errors partitionError = Errors.forCode(partitionMetadata.errorCode()); if (!requestPartitions.contains(topicPartition)) { // The `Metadata` response always returns all partitions for requested // topics, so we have to filter any that we are not interested in. continue; } if (partitionError != Errors.NONE) { handlePartitionError(topicPartition, partitionError, failed); continue; } int leaderId = partitionMetadata.leaderId(); if (leaderId >= 0) { mapped.put(topicPartition, leaderId); } else { log.debug("Metadata request for {} returned no error, but the leader is unknown. Will retry", topicPartition); } } } return new LookupResult<>(failed, mapped); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/RemoveMembersFromConsumerGroupHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.kafka.common.Node; import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity; import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.LeaveGroupRequest; import org.apache.kafka.common.requests.LeaveGroupResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; public class RemoveMembersFromConsumerGroupHandler extends AdminApiHandler.Batched<CoordinatorKey, Map<MemberIdentity, Errors>> { private final CoordinatorKey groupId; private final List<MemberIdentity> members; private final Logger log; private final AdminApiLookupStrategy<CoordinatorKey> lookupStrategy; public RemoveMembersFromConsumerGroupHandler( String groupId, List<MemberIdentity> members, LogContext logContext ) { this.groupId = CoordinatorKey.byGroupId(groupId); this.members = members; this.log = logContext.logger(RemoveMembersFromConsumerGroupHandler.class); this.lookupStrategy = new CoordinatorStrategy(CoordinatorType.GROUP, logContext); } @Override public String apiName() { return "leaveGroup"; } @Override public AdminApiLookupStrategy<CoordinatorKey> lookupStrategy() { return lookupStrategy; } public static AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, Map<MemberIdentity, Errors>> newFuture( String groupId ) { return AdminApiFuture.forKeys(Collections.singleton(CoordinatorKey.byGroupId(groupId))); } private void validateKeys( Set<CoordinatorKey> groupIds ) { if (!groupIds.equals(Collections.singleton(groupId))) { throw new IllegalArgumentException("Received unexpected group ids " + groupIds + " (expected only " + Collections.singleton(groupId) + ")"); } } @Override public LeaveGroupRequest.Builder buildBatchedRequest(int coordinatorId, Set<CoordinatorKey> groupIds) { validateKeys(groupIds); return new LeaveGroupRequest.Builder(groupId.idValue, members); } @Override public ApiResult<CoordinatorKey, Map<MemberIdentity, Errors>> handleResponse( Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse ) { validateKeys(groupIds); final LeaveGroupResponse response = (LeaveGroupResponse) abstractResponse; final Errors error = response.topLevelError(); if (error != Errors.NONE) { final Map<CoordinatorKey, Throwable> failed = new HashMap<>(); final Set<CoordinatorKey> groupsToUnmap = new HashSet<>(); handleGroupError(groupId, error, failed, groupsToUnmap); return new ApiResult<>(Collections.emptyMap(), failed, new ArrayList<>(groupsToUnmap)); } else { final Map<MemberIdentity, Errors> memberErrors = new HashMap<>(); for (MemberResponse memberResponse : response.memberResponses()) { memberErrors.put(new MemberIdentity() .setMemberId(memberResponse.memberId()) .setGroupInstanceId(memberResponse.groupInstanceId()), Errors.forCode(memberResponse.errorCode())); } return ApiResult.completed(groupId, memberErrors); } } private void handleGroupError( CoordinatorKey groupId, Errors error, Map<CoordinatorKey, Throwable> failed, Set<CoordinatorKey> groupsToUnmap ) { switch (error) { case GROUP_AUTHORIZATION_FAILED: log.debug("`LeaveGroup` request for group id {} failed due to error {}", groupId.idValue, error); failed.put(groupId, error.exception()); break; case COORDINATOR_LOAD_IN_PROGRESS: // If the coordinator is in the middle of loading, then we just need to retry log.debug("`LeaveGroup` request for group id {} failed because the coordinator " + "is still in the process of loading state. Will retry", groupId.idValue); break; case COORDINATOR_NOT_AVAILABLE: case NOT_COORDINATOR: // If the coordinator is unavailable or there was a coordinator change, then we unmap // the key so that we retry the `FindCoordinator` request log.debug("`LeaveGroup` request for group id {} returned error {}. " + "Will attempt to find the coordinator again and retry", groupId.idValue, error); groupsToUnmap.add(groupId); break; default: log.error("`LeaveGroup` request for group id {} failed due to unexpected error {}", groupId.idValue, error); failed.put(groupId, error.exception()); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/internals/StaticBrokerStrategy.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin.internals; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.requests.AbstractResponse; import java.util.OptionalInt; import java.util.Set; /** * This lookup strategy is used when we already know the destination broker ID * and we have no need for an explicit lookup. By setting {@link ApiRequestScope#destinationBrokerId()} * in the returned value for {@link #lookupScope(Object)}, the driver will * skip the lookup. */ public class StaticBrokerStrategy<K> implements AdminApiLookupStrategy<K> { private final SingleBrokerScope scope; public StaticBrokerStrategy(int brokerId) { this.scope = new SingleBrokerScope(brokerId); } @Override public ApiRequestScope lookupScope(K key) { return scope; } @Override public AbstractRequest.Builder<?> buildRequest(Set<K> keys) { throw new UnsupportedOperationException(); } @Override public LookupResult<K> handleResponse(Set<K> keys, AbstractResponse response) { throw new UnsupportedOperationException(); } private static class SingleBrokerScope implements ApiRequestScope { private final int brokerId; private SingleBrokerScope(int brokerId) { this.brokerId = brokerId; } @Override public OptionalInt destinationBrokerId() { return OptionalInt.of(brokerId); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/CommitFailedException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.KafkaException; /** * This exception is raised when an offset commit with {@link KafkaConsumer#commitSync()} fails * with an unrecoverable error. This can happen when a group rebalance completes before the commit * could be successfully applied. In this case, the commit cannot generally be retried because some * of the partitions may have already been assigned to another member in the group. */ public class CommitFailedException extends KafkaException { private static final long serialVersionUID = 1L; public CommitFailedException(final String message) { super(message); } public CommitFailedException() { super("Commit cannot be completed since the group has already " + "rebalanced and assigned the partitions to another member. This means that the time " + "between subsequent calls to poll() was longer than the configured max.poll.interval.ms, " + "which typically implies that the poll loop is spending too much time message processing. " + "You can address this either by increasing max.poll.interval.ms or by reducing the maximum " + "size of batches returned in poll() with max.poll.records."); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/Consumer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import java.io.Closeable; import java.time.Duration; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.OptionalLong; import java.util.Set; import java.util.regex.Pattern; /** * @see KafkaConsumer * @see MockConsumer */ public interface Consumer<K, V> extends Closeable { /** * @see KafkaConsumer#assignment() */ Set<TopicPartition> assignment(); /** * @see KafkaConsumer#subscription() */ Set<String> subscription(); /** * @see KafkaConsumer#subscribe(Collection) */ void subscribe(Collection<String> topics); /** * @see KafkaConsumer#subscribe(Collection, ConsumerRebalanceListener) */ void subscribe(Collection<String> topics, ConsumerRebalanceListener callback); /** * @see KafkaConsumer#assign(Collection) */ void assign(Collection<TopicPartition> partitions); /** * @see KafkaConsumer#subscribe(Pattern, ConsumerRebalanceListener) */ void subscribe(Pattern pattern, ConsumerRebalanceListener callback); /** * @see KafkaConsumer#subscribe(Pattern) */ void subscribe(Pattern pattern); /** * @see KafkaConsumer#unsubscribe() */ void unsubscribe(); /** * @see KafkaConsumer#poll(long) */ @Deprecated ConsumerRecords<K, V> poll(long timeout); /** * @see KafkaConsumer#poll(Duration) */ ConsumerRecords<K, V> poll(Duration timeout); /** * @see KafkaConsumer#commitSync() */ void commitSync(); /** * @see KafkaConsumer#commitSync(Duration) */ void commitSync(Duration timeout); /** * @see KafkaConsumer#commitSync(Map) */ void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets); /** * @see KafkaConsumer#commitSync(Map, Duration) */ void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets, final Duration timeout); /** * @see KafkaConsumer#commitAsync() */ void commitAsync(); /** * @see KafkaConsumer#commitAsync(OffsetCommitCallback) */ void commitAsync(OffsetCommitCallback callback); /** * @see KafkaConsumer#commitAsync(Map, OffsetCommitCallback) */ void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); /** * @see KafkaConsumer#seek(TopicPartition, long) */ void seek(TopicPartition partition, long offset); /** * @see KafkaConsumer#seek(TopicPartition, OffsetAndMetadata) */ void seek(TopicPartition partition, OffsetAndMetadata offsetAndMetadata); /** * @see KafkaConsumer#seekToBeginning(Collection) */ void seekToBeginning(Collection<TopicPartition> partitions); /** * @see KafkaConsumer#seekToEnd(Collection) */ void seekToEnd(Collection<TopicPartition> partitions); /** * @see KafkaConsumer#position(TopicPartition) */ long position(TopicPartition partition); /** * @see KafkaConsumer#position(TopicPartition, Duration) */ long position(TopicPartition partition, final Duration timeout); /** * @see KafkaConsumer#committed(TopicPartition) */ @Deprecated OffsetAndMetadata committed(TopicPartition partition); /** * @see KafkaConsumer#committed(TopicPartition, Duration) */ @Deprecated OffsetAndMetadata committed(TopicPartition partition, final Duration timeout); /** * @see KafkaConsumer#committed(Set) */ Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions); /** * @see KafkaConsumer#committed(Set, Duration) */ Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions, final Duration timeout); /** * @see KafkaConsumer#metrics() */ Map<MetricName, ? extends Metric> metrics(); /** * @see KafkaConsumer#partitionsFor(String) */ List<PartitionInfo> partitionsFor(String topic); /** * @see KafkaConsumer#partitionsFor(String, Duration) */ List<PartitionInfo> partitionsFor(String topic, Duration timeout); /** * @see KafkaConsumer#listTopics() */ Map<String, List<PartitionInfo>> listTopics(); /** * @see KafkaConsumer#listTopics(Duration) */ Map<String, List<PartitionInfo>> listTopics(Duration timeout); /** * @see KafkaConsumer#paused() */ Set<TopicPartition> paused(); /** * @see KafkaConsumer#pause(Collection) */ void pause(Collection<TopicPartition> partitions); /** * @see KafkaConsumer#resume(Collection) */ void resume(Collection<TopicPartition> partitions); /** * @see KafkaConsumer#offsetsForTimes(Map) */ Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); /** * @see KafkaConsumer#offsetsForTimes(Map, Duration) */ Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch, Duration timeout); /** * @see KafkaConsumer#beginningOffsets(Collection) */ Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); /** * @see KafkaConsumer#beginningOffsets(Collection, Duration) */ Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, Duration timeout); /** * @see KafkaConsumer#endOffsets(Collection) */ Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); /** * @see KafkaConsumer#endOffsets(Collection, Duration) */ Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, Duration timeout); /** * @see KafkaConsumer#currentLag(TopicPartition) */ OptionalLong currentLag(TopicPartition topicPartition); /** * @see KafkaConsumer#groupMetadata() */ ConsumerGroupMetadata groupMetadata(); /** * @see KafkaConsumer#enforceRebalance() */ void enforceRebalance(); /** * @see KafkaConsumer#enforceRebalance(String) */ void enforceRebalance(final String reason); /** * @see KafkaConsumer#close() */ void close(); /** * @see KafkaConsumer#close(Duration) */ void close(Duration timeout); /** * @see KafkaConsumer#wakeup() */ void wakeup(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/ConsumerConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.clients.ClientDnsLookup; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.config.SecurityConfig; import org.apache.kafka.common.errors.InvalidConfigurationException; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.requests.JoinGroupRequest; import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.utils.Utils; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import static org.apache.kafka.clients.consumer.CooperativeStickyAssignor.COOPERATIVE_STICKY_ASSIGNOR_NAME; import static org.apache.kafka.clients.consumer.RangeAssignor.RANGE_ASSIGNOR_NAME; import static org.apache.kafka.clients.consumer.RoundRobinAssignor.ROUNDROBIN_ASSIGNOR_NAME; import static org.apache.kafka.clients.consumer.StickyAssignor.STICKY_ASSIGNOR_NAME; import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; import static org.apache.kafka.common.config.ConfigDef.ValidString.in; /** * The consumer configuration keys */ public class ConsumerConfig extends AbstractConfig { private static final ConfigDef CONFIG; // a list contains all the assignor names that only assign subscribed topics to consumer. Should be updated when new assignor added. // This is to help optimize ConsumerCoordinator#performAssignment method public static final List<String> ASSIGN_FROM_SUBSCRIBED_ASSIGNORS = Collections.unmodifiableList(Arrays.asList( RANGE_ASSIGNOR_NAME, ROUNDROBIN_ASSIGNOR_NAME, STICKY_ASSIGNOR_NAME, COOPERATIVE_STICKY_ASSIGNOR_NAME )); /* * NOTE: DO NOT CHANGE EITHER CONFIG STRINGS OR THEIR JAVA VARIABLE NAMES AS * THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. */ /** * <code>group.id</code> */ public static final String GROUP_ID_CONFIG = CommonClientConfigs.GROUP_ID_CONFIG; private static final String GROUP_ID_DOC = CommonClientConfigs.GROUP_ID_DOC; /** * <code>group.instance.id</code> */ public static final String GROUP_INSTANCE_ID_CONFIG = CommonClientConfigs.GROUP_INSTANCE_ID_CONFIG; private static final String GROUP_INSTANCE_ID_DOC = CommonClientConfigs.GROUP_INSTANCE_ID_DOC; /** <code>max.poll.records</code> */ public static final String MAX_POLL_RECORDS_CONFIG = "max.poll.records"; private static final String MAX_POLL_RECORDS_DOC = "The maximum number of records returned in a single call to poll()." + " Note, that <code>" + MAX_POLL_RECORDS_CONFIG + "</code> does not impact the underlying fetching behavior." + " The consumer will cache the records from each fetch request and returns them incrementally from each poll."; public static final int DEFAULT_MAX_POLL_RECORDS = 500; /** <code>max.poll.interval.ms</code> */ public static final String MAX_POLL_INTERVAL_MS_CONFIG = CommonClientConfigs.MAX_POLL_INTERVAL_MS_CONFIG; private static final String MAX_POLL_INTERVAL_MS_DOC = CommonClientConfigs.MAX_POLL_INTERVAL_MS_DOC; /** * <code>session.timeout.ms</code> */ public static final String SESSION_TIMEOUT_MS_CONFIG = CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG; private static final String SESSION_TIMEOUT_MS_DOC = CommonClientConfigs.SESSION_TIMEOUT_MS_DOC; /** * <code>heartbeat.interval.ms</code> */ public static final String HEARTBEAT_INTERVAL_MS_CONFIG = CommonClientConfigs.HEARTBEAT_INTERVAL_MS_CONFIG; private static final String HEARTBEAT_INTERVAL_MS_DOC = CommonClientConfigs.HEARTBEAT_INTERVAL_MS_DOC; /** * <code>bootstrap.servers</code> */ public static final String BOOTSTRAP_SERVERS_CONFIG = CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG; /** <code>client.dns.lookup</code> */ public static final String CLIENT_DNS_LOOKUP_CONFIG = CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG; /** * <code>enable.auto.commit</code> */ public static final String ENABLE_AUTO_COMMIT_CONFIG = "enable.auto.commit"; private static final String ENABLE_AUTO_COMMIT_DOC = "If true the consumer's offset will be periodically committed in the background."; /** * <code>auto.commit.interval.ms</code> */ public static final String AUTO_COMMIT_INTERVAL_MS_CONFIG = "auto.commit.interval.ms"; private static final String AUTO_COMMIT_INTERVAL_MS_DOC = "The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if <code>enable.auto.commit</code> is set to <code>true</code>."; /** * <code>partition.assignment.strategy</code> */ public static final String PARTITION_ASSIGNMENT_STRATEGY_CONFIG = "partition.assignment.strategy"; private static final String PARTITION_ASSIGNMENT_STRATEGY_DOC = "A list of class names or class types, " + "ordered by preference, of supported partition assignment strategies that the client will use to distribute " + "partition ownership amongst consumer instances when group management is used. Available options are:" + "<ul>" + "<li><code>org.apache.kafka.clients.consumer.RangeAssignor</code>: Assigns partitions on a per-topic basis.</li>" + "<li><code>org.apache.kafka.clients.consumer.RoundRobinAssignor</code>: Assigns partitions to consumers in a round-robin fashion.</li>" + "<li><code>org.apache.kafka.clients.consumer.StickyAssignor</code>: Guarantees an assignment that is " + "maximally balanced while preserving as many existing partition assignments as possible.</li>" + "<li><code>org.apache.kafka.clients.consumer.CooperativeStickyAssignor</code>: Follows the same StickyAssignor " + "logic, but allows for cooperative rebalancing.</li>" + "</ul>" + "<p>The default assignor is [RangeAssignor, CooperativeStickyAssignor], which will use the RangeAssignor by default, " + "but allows upgrading to the CooperativeStickyAssignor with just a single rolling bounce that removes the RangeAssignor from the list.</p>" + "<p>Implementing the <code>org.apache.kafka.clients.consumer.ConsumerPartitionAssignor</code> " + "interface allows you to plug in a custom assignment strategy.</p>"; /** * <code>auto.offset.reset</code> */ public static final String AUTO_OFFSET_RESET_CONFIG = "auto.offset.reset"; public static final String AUTO_OFFSET_RESET_DOC = "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server (e.g. because that data has been deleted): <ul><li>earliest: automatically reset the offset to the earliest offset<li>latest: automatically reset the offset to the latest offset</li><li>none: throw exception to the consumer if no previous offset is found for the consumer's group</li><li>anything else: throw exception to the consumer.</li></ul>"; /** * <code>fetch.min.bytes</code> */ public static final String FETCH_MIN_BYTES_CONFIG = "fetch.min.bytes"; public static final int DEFAULT_FETCH_MIN_BYTES = 1; private static final String FETCH_MIN_BYTES_DOC = "The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. The default setting of " + DEFAULT_FETCH_MIN_BYTES + " byte means that fetch requests are answered as soon as that many byte(s) of data is available or the fetch request times out waiting for data to arrive. Setting this to a larger value will cause the server to wait for larger amounts of data to accumulate which can improve server throughput a bit at the cost of some additional latency."; /** * <code>fetch.max.bytes</code> */ public static final String FETCH_MAX_BYTES_CONFIG = "fetch.max.bytes"; private static final String FETCH_MAX_BYTES_DOC = "The maximum amount of data the server should return for a fetch request. " + "Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than " + "this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. " + "The maximum record batch size accepted by the broker is defined via <code>message.max.bytes</code> (broker config) or " + "<code>max.message.bytes</code> (topic config). Note that the consumer performs multiple fetches in parallel."; public static final int DEFAULT_FETCH_MAX_BYTES = 50 * 1024 * 1024; /** * <code>fetch.max.wait.ms</code> */ public static final String FETCH_MAX_WAIT_MS_CONFIG = "fetch.max.wait.ms"; private static final String FETCH_MAX_WAIT_MS_DOC = "The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy the requirement given by fetch.min.bytes."; public static final int DEFAULT_FETCH_MAX_WAIT_MS = 500; /** <code>metadata.max.age.ms</code> */ public static final String METADATA_MAX_AGE_CONFIG = CommonClientConfigs.METADATA_MAX_AGE_CONFIG; /** * <code>max.partition.fetch.bytes</code> */ public static final String MAX_PARTITION_FETCH_BYTES_CONFIG = "max.partition.fetch.bytes"; private static final String MAX_PARTITION_FETCH_BYTES_DOC = "The maximum amount of data per-partition the server " + "will return. Records are fetched in batches by the consumer. If the first record batch in the first non-empty " + "partition of the fetch is larger than this limit, the " + "batch will still be returned to ensure that the consumer can make progress. The maximum record batch size " + "accepted by the broker is defined via <code>message.max.bytes</code> (broker config) or " + "<code>max.message.bytes</code> (topic config). See " + FETCH_MAX_BYTES_CONFIG + " for limiting the consumer request size."; public static final int DEFAULT_MAX_PARTITION_FETCH_BYTES = 1 * 1024 * 1024; /** <code>send.buffer.bytes</code> */ public static final String SEND_BUFFER_CONFIG = CommonClientConfigs.SEND_BUFFER_CONFIG; /** <code>receive.buffer.bytes</code> */ public static final String RECEIVE_BUFFER_CONFIG = CommonClientConfigs.RECEIVE_BUFFER_CONFIG; /** * <code>client.id</code> */ public static final String CLIENT_ID_CONFIG = CommonClientConfigs.CLIENT_ID_CONFIG; /** * <code>client.rack</code> */ public static final String CLIENT_RACK_CONFIG = CommonClientConfigs.CLIENT_RACK_CONFIG; public static final String DEFAULT_CLIENT_RACK = CommonClientConfigs.DEFAULT_CLIENT_RACK; /** * <code>reconnect.backoff.ms</code> */ public static final String RECONNECT_BACKOFF_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG; /** * <code>reconnect.backoff.max.ms</code> */ public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG; /** * <code>retry.backoff.ms</code> */ public static final String RETRY_BACKOFF_MS_CONFIG = CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG; /** * <code>metrics.sample.window.ms</code> */ public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG; /** * <code>metrics.num.samples</code> */ public static final String METRICS_NUM_SAMPLES_CONFIG = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG; /** * <code>metrics.log.level</code> */ public static final String METRICS_RECORDING_LEVEL_CONFIG = CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG; /** * <code>metric.reporters</code> */ public static final String METRIC_REPORTER_CLASSES_CONFIG = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG; /** * <code>auto.include.jmx.reporter</code> * */ @Deprecated public static final String AUTO_INCLUDE_JMX_REPORTER_CONFIG = CommonClientConfigs.AUTO_INCLUDE_JMX_REPORTER_CONFIG; /** * <code>check.crcs</code> */ public static final String CHECK_CRCS_CONFIG = "check.crcs"; private static final String CHECK_CRCS_DOC = "Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance."; /** <code>key.deserializer</code> */ public static final String KEY_DESERIALIZER_CLASS_CONFIG = "key.deserializer"; public static final String KEY_DESERIALIZER_CLASS_DOC = "Deserializer class for key that implements the <code>org.apache.kafka.common.serialization.Deserializer</code> interface."; /** <code>value.deserializer</code> */ public static final String VALUE_DESERIALIZER_CLASS_CONFIG = "value.deserializer"; public static final String VALUE_DESERIALIZER_CLASS_DOC = "Deserializer class for value that implements the <code>org.apache.kafka.common.serialization.Deserializer</code> interface."; /** <code>socket.connection.setup.timeout.ms</code> */ public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG; /** <code>socket.connection.setup.timeout.max.ms</code> */ public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG; /** <code>connections.max.idle.ms</code> */ public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG; /** <code>request.timeout.ms</code> */ public static final String REQUEST_TIMEOUT_MS_CONFIG = CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG; private static final String REQUEST_TIMEOUT_MS_DOC = CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC; /** <code>default.api.timeout.ms</code> */ public static final String DEFAULT_API_TIMEOUT_MS_CONFIG = CommonClientConfigs.DEFAULT_API_TIMEOUT_MS_CONFIG; /** <code>interceptor.classes</code> */ public static final String INTERCEPTOR_CLASSES_CONFIG = "interceptor.classes"; public static final String INTERCEPTOR_CLASSES_DOC = "A list of classes to use as interceptors. " + "Implementing the <code>org.apache.kafka.clients.consumer.ConsumerInterceptor</code> interface allows you to intercept (and possibly mutate) records " + "received by the consumer. By default, there are no interceptors."; /** <code>exclude.internal.topics</code> */ public static final String EXCLUDE_INTERNAL_TOPICS_CONFIG = "exclude.internal.topics"; private static final String EXCLUDE_INTERNAL_TOPICS_DOC = "Whether internal topics matching a subscribed pattern should " + "be excluded from the subscription. It is always possible to explicitly subscribe to an internal topic."; public static final boolean DEFAULT_EXCLUDE_INTERNAL_TOPICS = true; /** * <code>internal.leave.group.on.close</code> * Whether or not the consumer should leave the group on close. If set to <code>false</code> then a rebalance * won't occur until <code>session.timeout.ms</code> expires. * * <p> * Note: this is an internal configuration and could be changed in the future in a backward incompatible way * */ static final String LEAVE_GROUP_ON_CLOSE_CONFIG = "internal.leave.group.on.close"; /** * <code>internal.throw.on.fetch.stable.offset.unsupported</code> * Whether or not the consumer should throw when the new stable offset feature is supported. * If set to <code>true</code> then the client shall crash upon hitting it. * The purpose of this flag is to prevent unexpected broker downgrade which makes * the offset fetch protection against pending commit invalid. The safest approach * is to fail fast to avoid introducing correctness issue. * * <p> * Note: this is an internal configuration and could be changed in the future in a backward incompatible way * */ static final String THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED = "internal.throw.on.fetch.stable.offset.unsupported"; /** <code>isolation.level</code> */ public static final String ISOLATION_LEVEL_CONFIG = "isolation.level"; public static final String ISOLATION_LEVEL_DOC = "Controls how to read messages written transactionally. If set to <code>read_committed</code>, consumer.poll() will only return" + " transactional messages which have been committed. If set to <code>read_uncommitted</code> (the default), consumer.poll() will return all messages, even transactional messages" + " which have been aborted. Non-transactional messages will be returned unconditionally in either mode. <p>Messages will always be returned in offset order. Hence, in " + " <code>read_committed</code> mode, consumer.poll() will only return messages up to the last stable offset (LSO), which is the one less than the offset of the first open transaction." + " In particular any messages appearing after messages belonging to ongoing transactions will be withheld until the relevant transaction has been completed. As a result, <code>read_committed</code>" + " consumers will not be able to read up to the high watermark when there are in flight transactions.</p><p> Further, when in <code>read_committed</code> the seekToEnd method will" + " return the LSO</p>"; public static final String DEFAULT_ISOLATION_LEVEL = IsolationLevel.READ_UNCOMMITTED.toString().toLowerCase(Locale.ROOT); /** <code>allow.auto.create.topics</code> */ public static final String ALLOW_AUTO_CREATE_TOPICS_CONFIG = "allow.auto.create.topics"; private static final String ALLOW_AUTO_CREATE_TOPICS_DOC = "Allow automatic topic creation on the broker when" + " subscribing to or assigning a topic. A topic being subscribed to will be automatically created only if the" + " broker allows for it using `auto.create.topics.enable` broker configuration. This configuration must" + " be set to `false` when using brokers older than 0.11.0"; public static final boolean DEFAULT_ALLOW_AUTO_CREATE_TOPICS = true; /** * <code>security.providers</code> */ public static final String SECURITY_PROVIDERS_CONFIG = SecurityConfig.SECURITY_PROVIDERS_CONFIG; private static final String SECURITY_PROVIDERS_DOC = SecurityConfig.SECURITY_PROVIDERS_DOC; private static final AtomicInteger CONSUMER_CLIENT_ID_SEQUENCE = new AtomicInteger(1); static { CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, Collections.emptyList(), new ConfigDef.NonNullValidator(), Importance.HIGH, CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) .define(CLIENT_DNS_LOOKUP_CONFIG, Type.STRING, ClientDnsLookup.USE_ALL_DNS_IPS.toString(), in(ClientDnsLookup.USE_ALL_DNS_IPS.toString(), ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY.toString()), Importance.MEDIUM, CommonClientConfigs.CLIENT_DNS_LOOKUP_DOC) .define(GROUP_ID_CONFIG, Type.STRING, null, Importance.HIGH, GROUP_ID_DOC) .define(GROUP_INSTANCE_ID_CONFIG, Type.STRING, null, new ConfigDef.NonEmptyString(), Importance.MEDIUM, GROUP_INSTANCE_ID_DOC) .define(SESSION_TIMEOUT_MS_CONFIG, Type.INT, 45000, Importance.HIGH, SESSION_TIMEOUT_MS_DOC) .define(HEARTBEAT_INTERVAL_MS_CONFIG, Type.INT, 3000, Importance.HIGH, HEARTBEAT_INTERVAL_MS_DOC) .define(PARTITION_ASSIGNMENT_STRATEGY_CONFIG, Type.LIST, Arrays.asList(RangeAssignor.class, CooperativeStickyAssignor.class), new ConfigDef.NonNullValidator(), Importance.MEDIUM, PARTITION_ASSIGNMENT_STRATEGY_DOC) .define(METADATA_MAX_AGE_CONFIG, Type.LONG, 5 * 60 * 1000, atLeast(0), Importance.LOW, CommonClientConfigs.METADATA_MAX_AGE_DOC) .define(ENABLE_AUTO_COMMIT_CONFIG, Type.BOOLEAN, true, Importance.MEDIUM, ENABLE_AUTO_COMMIT_DOC) .define(AUTO_COMMIT_INTERVAL_MS_CONFIG, Type.INT, 5000, atLeast(0), Importance.LOW, AUTO_COMMIT_INTERVAL_MS_DOC) .define(CLIENT_ID_CONFIG, Type.STRING, "", Importance.LOW, CommonClientConfigs.CLIENT_ID_DOC) .define(CLIENT_RACK_CONFIG, Type.STRING, DEFAULT_CLIENT_RACK, Importance.LOW, CommonClientConfigs.CLIENT_RACK_DOC) .define(MAX_PARTITION_FETCH_BYTES_CONFIG, Type.INT, DEFAULT_MAX_PARTITION_FETCH_BYTES, atLeast(0), Importance.HIGH, MAX_PARTITION_FETCH_BYTES_DOC) .define(SEND_BUFFER_CONFIG, Type.INT, 128 * 1024, atLeast(CommonClientConfigs.SEND_BUFFER_LOWER_BOUND), Importance.MEDIUM, CommonClientConfigs.SEND_BUFFER_DOC) .define(RECEIVE_BUFFER_CONFIG, Type.INT, 64 * 1024, atLeast(CommonClientConfigs.RECEIVE_BUFFER_LOWER_BOUND), Importance.MEDIUM, CommonClientConfigs.RECEIVE_BUFFER_DOC) .define(FETCH_MIN_BYTES_CONFIG, Type.INT, DEFAULT_FETCH_MIN_BYTES, atLeast(0), Importance.HIGH, FETCH_MIN_BYTES_DOC) .define(FETCH_MAX_BYTES_CONFIG, Type.INT, DEFAULT_FETCH_MAX_BYTES, atLeast(0), Importance.MEDIUM, FETCH_MAX_BYTES_DOC) .define(FETCH_MAX_WAIT_MS_CONFIG, Type.INT, DEFAULT_FETCH_MAX_WAIT_MS, atLeast(0), Importance.LOW, FETCH_MAX_WAIT_MS_DOC) .define(RECONNECT_BACKOFF_MS_CONFIG, Type.LONG, 50L, atLeast(0L), Importance.LOW, CommonClientConfigs.RECONNECT_BACKOFF_MS_DOC) .define(RECONNECT_BACKOFF_MAX_MS_CONFIG, Type.LONG, 1000L, atLeast(0L), Importance.LOW, CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_DOC) .define(RETRY_BACKOFF_MS_CONFIG, Type.LONG, 100L, atLeast(0L), Importance.LOW, CommonClientConfigs.RETRY_BACKOFF_MS_DOC) .define(AUTO_OFFSET_RESET_CONFIG, Type.STRING, OffsetResetStrategy.LATEST.toString(), in(Utils.enumOptions(OffsetResetStrategy.class)), Importance.MEDIUM, AUTO_OFFSET_RESET_DOC) .define(CHECK_CRCS_CONFIG, Type.BOOLEAN, true, Importance.LOW, CHECK_CRCS_DOC) .define(METRICS_SAMPLE_WINDOW_MS_CONFIG, Type.LONG, 30000, atLeast(0), Importance.LOW, CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_DOC) .define(METRICS_NUM_SAMPLES_CONFIG, Type.INT, 2, atLeast(1), Importance.LOW, CommonClientConfigs.METRICS_NUM_SAMPLES_DOC) .define(METRICS_RECORDING_LEVEL_CONFIG, Type.STRING, Sensor.RecordingLevel.INFO.toString(), in(Sensor.RecordingLevel.INFO.toString(), Sensor.RecordingLevel.DEBUG.toString(), Sensor.RecordingLevel.TRACE.toString()), Importance.LOW, CommonClientConfigs.METRICS_RECORDING_LEVEL_DOC) .define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, Collections.emptyList(), new ConfigDef.NonNullValidator(), Importance.LOW, CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) .define(AUTO_INCLUDE_JMX_REPORTER_CONFIG, Type.BOOLEAN, true, Importance.LOW, CommonClientConfigs.AUTO_INCLUDE_JMX_REPORTER_DOC) .define(KEY_DESERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, KEY_DESERIALIZER_CLASS_DOC) .define(VALUE_DESERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, VALUE_DESERIALIZER_CLASS_DOC) .define(REQUEST_TIMEOUT_MS_CONFIG, Type.INT, 30000, atLeast(0), Importance.MEDIUM, REQUEST_TIMEOUT_MS_DOC) .define(DEFAULT_API_TIMEOUT_MS_CONFIG, Type.INT, 60 * 1000, atLeast(0), Importance.MEDIUM, CommonClientConfigs.DEFAULT_API_TIMEOUT_MS_DOC) .define(SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG, Type.LONG, CommonClientConfigs.DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MS, Importance.MEDIUM, CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_DOC) .define(SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG, Type.LONG, CommonClientConfigs.DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS, Importance.MEDIUM, CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_DOC) /* default is set to be a bit lower than the server default (10 min), to avoid both client and server closing connection at same time */ .define(CONNECTIONS_MAX_IDLE_MS_CONFIG, Type.LONG, 9 * 60 * 1000, Importance.MEDIUM, CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC) .define(INTERCEPTOR_CLASSES_CONFIG, Type.LIST, Collections.emptyList(), new ConfigDef.NonNullValidator(), Importance.LOW, INTERCEPTOR_CLASSES_DOC) .define(MAX_POLL_RECORDS_CONFIG, Type.INT, DEFAULT_MAX_POLL_RECORDS, atLeast(1), Importance.MEDIUM, MAX_POLL_RECORDS_DOC) .define(MAX_POLL_INTERVAL_MS_CONFIG, Type.INT, 300000, atLeast(1), Importance.MEDIUM, MAX_POLL_INTERVAL_MS_DOC) .define(EXCLUDE_INTERNAL_TOPICS_CONFIG, Type.BOOLEAN, DEFAULT_EXCLUDE_INTERNAL_TOPICS, Importance.MEDIUM, EXCLUDE_INTERNAL_TOPICS_DOC) .defineInternal(LEAVE_GROUP_ON_CLOSE_CONFIG, Type.BOOLEAN, true, Importance.LOW) .defineInternal(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED, Type.BOOLEAN, false, Importance.LOW) .define(ISOLATION_LEVEL_CONFIG, Type.STRING, DEFAULT_ISOLATION_LEVEL, in(IsolationLevel.READ_COMMITTED.toString().toLowerCase(Locale.ROOT), IsolationLevel.READ_UNCOMMITTED.toString().toLowerCase(Locale.ROOT)), Importance.MEDIUM, ISOLATION_LEVEL_DOC) .define(ALLOW_AUTO_CREATE_TOPICS_CONFIG, Type.BOOLEAN, DEFAULT_ALLOW_AUTO_CREATE_TOPICS, Importance.MEDIUM, ALLOW_AUTO_CREATE_TOPICS_DOC) // security support .define(SECURITY_PROVIDERS_CONFIG, Type.STRING, null, Importance.LOW, SECURITY_PROVIDERS_DOC) .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, ConfigDef.CaseInsensitiveValidString .in(Utils.enumOptions(SecurityProtocol.class)), Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) .withClientSslSupport() .withClientSaslSupport(); } @Override protected Map<String, Object> postProcessParsedConfig(final Map<String, Object> parsedValues) { CommonClientConfigs.postValidateSaslMechanismConfig(this); Map<String, Object> refinedConfigs = CommonClientConfigs.postProcessReconnectBackoffConfigs(this, parsedValues); maybeOverrideClientId(refinedConfigs); return refinedConfigs; } private void maybeOverrideClientId(Map<String, Object> configs) { final String clientId = this.getString(CLIENT_ID_CONFIG); if (clientId == null || clientId.isEmpty()) { final String groupId = this.getString(GROUP_ID_CONFIG); String groupInstanceId = this.getString(GROUP_INSTANCE_ID_CONFIG); if (groupInstanceId != null) JoinGroupRequest.validateGroupInstanceId(groupInstanceId); String groupInstanceIdPart = groupInstanceId != null ? groupInstanceId : CONSUMER_CLIENT_ID_SEQUENCE.getAndIncrement() + ""; String generatedClientId = String.format("consumer-%s-%s", groupId, groupInstanceIdPart); configs.put(CLIENT_ID_CONFIG, generatedClientId); } } protected static Map<String, Object> appendDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { // validate deserializer configuration, if the passed deserializer instance is null, the user must explicitly set a valid deserializer configuration value Map<String, Object> newConfigs = new HashMap<>(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); else if (newConfigs.get(KEY_DESERIALIZER_CLASS_CONFIG) == null) throw new ConfigException(KEY_DESERIALIZER_CLASS_CONFIG, null, "must be non-null."); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); else if (newConfigs.get(VALUE_DESERIALIZER_CLASS_CONFIG) == null) throw new ConfigException(VALUE_DESERIALIZER_CLASS_CONFIG, null, "must be non-null."); return newConfigs; } boolean maybeOverrideEnableAutoCommit() { Optional<String> groupId = Optional.ofNullable(getString(CommonClientConfigs.GROUP_ID_CONFIG)); boolean enableAutoCommit = getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG); if (!groupId.isPresent()) { // overwrite in case of default group id where the config is not explicitly provided if (!originals().containsKey(ENABLE_AUTO_COMMIT_CONFIG)) { enableAutoCommit = false; } else if (enableAutoCommit) { throw new InvalidConfigurationException(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG + " cannot be set to true when default group id (null) is used."); } } return enableAutoCommit; } public ConsumerConfig(Properties props) { super(CONFIG, props, "consumer"); } public ConsumerConfig(Map<String, Object> props) { super(CONFIG, props, "consumer"); } protected ConsumerConfig(Map<?, ?> props, boolean doLog) { super(CONFIG, props, doLog, "consumer"); } public static Set<String> configNames() { return CONFIG.names(); } public static ConfigDef configDef() { return new ConfigDef(CONFIG); } public static void main(String[] args) { System.out.println(CONFIG.toHtml(4, config -> "consumerconfigs_" + config)); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/ConsumerGroupMetadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.requests.JoinGroupRequest; import java.util.Objects; import java.util.Optional; /** * A metadata struct containing the consumer group information. * Note: Any change to this class is considered public and requires a KIP. */ public class ConsumerGroupMetadata { final private String groupId; final private int generationId; final private String memberId; final private Optional<String> groupInstanceId; public ConsumerGroupMetadata(String groupId, int generationId, String memberId, Optional<String> groupInstanceId) { this.groupId = Objects.requireNonNull(groupId, "group.id can't be null"); this.generationId = generationId; this.memberId = Objects.requireNonNull(memberId, "member.id can't be null"); this.groupInstanceId = Objects.requireNonNull(groupInstanceId, "group.instance.id can't be null"); } public ConsumerGroupMetadata(String groupId) { this(groupId, JoinGroupRequest.UNKNOWN_GENERATION_ID, JoinGroupRequest.UNKNOWN_MEMBER_ID, Optional.empty()); } public String groupId() { return groupId; } public int generationId() { return generationId; } public String memberId() { return memberId; } public Optional<String> groupInstanceId() { return groupInstanceId; } @Override public String toString() { return String.format("GroupMetadata(groupId = %s, generationId = %d, memberId = %s, groupInstanceId = %s)", groupId, generationId, memberId, groupInstanceId.orElse("")); } @Override public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final ConsumerGroupMetadata that = (ConsumerGroupMetadata) o; return generationId == that.generationId && Objects.equals(groupId, that.groupId) && Objects.equals(memberId, that.memberId) && Objects.equals(groupInstanceId, that.groupInstanceId); } @Override public int hashCode() { return Objects.hash(groupId, generationId, memberId, groupInstanceId); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/ConsumerInterceptor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.Configurable; import org.apache.kafka.common.TopicPartition; import java.util.Map; /** * A plugin interface that allows you to intercept (and possibly mutate) records received by the consumer. A primary use-case * is for third-party components to hook into the consumer applications for custom monitoring, logging, etc. * * <p> * This class will get consumer config properties via <code>configure()</code> method, including clientId assigned * by KafkaConsumer if not specified in the consumer config. The interceptor implementation needs to be aware that it will be * sharing consumer config namespace with other interceptors and serializers, and ensure that there are no conflicts. * <p> * Exceptions thrown by ConsumerInterceptor methods will be caught, logged, but not propagated further. As a result, if * the user configures the interceptor with the wrong key and value type parameters, the consumer will not throw an exception, * just log the errors. * <p> * ConsumerInterceptor callbacks are called from the same thread that invokes * {@link org.apache.kafka.clients.consumer.KafkaConsumer#poll(java.time.Duration)}. * <p> * Implement {@link org.apache.kafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. */ public interface ConsumerInterceptor<K, V> extends Configurable, AutoCloseable { /** * This is called just before the records are returned by * {@link org.apache.kafka.clients.consumer.KafkaConsumer#poll(java.time.Duration)} * <p> * This method is allowed to modify consumer records, in which case the new records will be * returned. There is no limitation on number of records that could be returned from this * method. I.e., the interceptor can filter the records or generate new records. * <p> * Any exception thrown by this method will be caught by the caller, logged, but not propagated to the client. * <p> * Since the consumer may run multiple interceptors, a particular interceptor's onConsume() callback will be called * in the order specified by {@link org.apache.kafka.clients.consumer.ConsumerConfig#INTERCEPTOR_CLASSES_CONFIG}. * The first interceptor in the list gets the consumed records, the following interceptor will be passed the records returned * by the previous interceptor, and so on. Since interceptors are allowed to modify records, interceptors may potentially get * the records already modified by other interceptors. However, building a pipeline of mutable interceptors that depend on the output * of the previous interceptor is discouraged, because of potential side-effects caused by interceptors potentially failing * to modify the record and throwing an exception. If one of the interceptors in the list throws an exception from onConsume(), * the exception is caught, logged, and the next interceptor is called with the records returned by the last successful interceptor * in the list, or otherwise the original consumed records. * * @param records records to be consumed by the client or records returned by the previous interceptors in the list. * @return records that are either modified by the interceptor or same as records passed to this method. */ ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records); /** * This is called when offsets get committed. * <p> * Any exception thrown by this method will be ignored by the caller. * * @param offsets A map of offsets by partition with associated metadata */ void onCommit(Map<TopicPartition, OffsetAndMetadata> offsets); /** * This is called when interceptor is closed */ void close(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.Optional; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.Configurable; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.utils.Utils; import static org.apache.kafka.clients.consumer.internals.AbstractStickyAssignor.DEFAULT_GENERATION; /** * This interface is used to define custom partition assignment for use in * {@link org.apache.kafka.clients.consumer.KafkaConsumer}. Members of the consumer group subscribe * to the topics they are interested in and forward their subscriptions to a Kafka broker serving * as the group coordinator. The coordinator selects one member to perform the group assignment and * propagates the subscriptions of all members to it. Then {@link #assign(Cluster, GroupSubscription)} is called * to perform the assignment and the results are forwarded back to each respective members * * In some cases, it is useful to forward additional metadata to the assignor in order to make * assignment decisions. For this, you can override {@link #subscriptionUserData(Set)} and provide custom * userData in the returned Subscription. For example, to have a rack-aware assignor, an implementation * can use this user data to forward the rackId belonging to each member. */ public interface ConsumerPartitionAssignor { /** * Return serialized data that will be included in the {@link Subscription} sent to the leader * and can be leveraged in {@link #assign(Cluster, GroupSubscription)} ((e.g. local host/rack information) * * @param topics Topics subscribed to through {@link org.apache.kafka.clients.consumer.KafkaConsumer#subscribe(java.util.Collection)} * and variants * @return nullable subscription user data */ default ByteBuffer subscriptionUserData(Set<String> topics) { return null; } /** * Perform the group assignment given the member subscriptions and current cluster metadata. * @param metadata Current topic/broker metadata known by consumer * @param groupSubscription Subscriptions from all members including metadata provided through {@link #subscriptionUserData(Set)} * @return A map from the members to their respective assignments. This should have one entry * for each member in the input subscription map. */ GroupAssignment assign(Cluster metadata, GroupSubscription groupSubscription); /** * Callback which is invoked when a group member receives its assignment from the leader. * @param assignment The local member's assignment as provided by the leader in {@link #assign(Cluster, GroupSubscription)} * @param metadata Additional metadata on the consumer (optional) */ default void onAssignment(Assignment assignment, ConsumerGroupMetadata metadata) { } /** * Indicate which rebalance protocol this assignor works with; * By default it should always work with {@link RebalanceProtocol#EAGER}. */ default List<RebalanceProtocol> supportedProtocols() { return Collections.singletonList(RebalanceProtocol.EAGER); } /** * Return the version of the assignor which indicates how the user metadata encodings * and the assignment algorithm gets evolved. */ default short version() { return (short) 0; } /** * Unique name for this assignor (e.g. "range" or "roundrobin" or "sticky"). Note, this is not required * to be the same as the class name specified in {@link ConsumerConfig#PARTITION_ASSIGNMENT_STRATEGY_CONFIG} * @return non-null unique name */ String name(); final class Subscription { private final List<String> topics; private final ByteBuffer userData; private final List<TopicPartition> ownedPartitions; private final Optional<String> rackId; private Optional<String> groupInstanceId; private final Optional<Integer> generationId; public Subscription(List<String> topics, ByteBuffer userData, List<TopicPartition> ownedPartitions, int generationId, Optional<String> rackId) { this.topics = topics; this.userData = userData; this.ownedPartitions = ownedPartitions; this.groupInstanceId = Optional.empty(); this.generationId = generationId < 0 ? Optional.empty() : Optional.of(generationId); this.rackId = rackId; } public Subscription(List<String> topics, ByteBuffer userData, List<TopicPartition> ownedPartitions) { this(topics, userData, ownedPartitions, DEFAULT_GENERATION, Optional.empty()); } public Subscription(List<String> topics, ByteBuffer userData) { this(topics, userData, Collections.emptyList(), DEFAULT_GENERATION, Optional.empty()); } public Subscription(List<String> topics) { this(topics, null, Collections.emptyList(), DEFAULT_GENERATION, Optional.empty()); } public List<String> topics() { return topics; } public ByteBuffer userData() { return userData; } public List<TopicPartition> ownedPartitions() { return ownedPartitions; } public Optional<String> rackId() { return rackId; } public void setGroupInstanceId(Optional<String> groupInstanceId) { this.groupInstanceId = groupInstanceId; } public Optional<String> groupInstanceId() { return groupInstanceId; } public Optional<Integer> generationId() { return generationId; } @Override public String toString() { return "Subscription(" + "topics=" + topics + (userData == null ? "" : ", userDataSize=" + userData.remaining()) + ", ownedPartitions=" + ownedPartitions + ", groupInstanceId=" + groupInstanceId.map(String::toString).orElse("null") + ", generationId=" + generationId.orElse(-1) + ", rackId=" + (rackId.orElse("null")) + ")"; } } final class Assignment { private List<TopicPartition> partitions; private ByteBuffer userData; public Assignment(List<TopicPartition> partitions, ByteBuffer userData) { this.partitions = partitions; this.userData = userData; } public Assignment(List<TopicPartition> partitions) { this(partitions, null); } public List<TopicPartition> partitions() { return partitions; } public ByteBuffer userData() { return userData; } @Override public String toString() { return "Assignment(" + "partitions=" + partitions + (userData == null ? "" : ", userDataSize=" + userData.remaining()) + ')'; } } final class GroupSubscription { private final Map<String, Subscription> subscriptions; public GroupSubscription(Map<String, Subscription> subscriptions) { this.subscriptions = subscriptions; } public Map<String, Subscription> groupSubscription() { return subscriptions; } @Override public String toString() { return "GroupSubscription(" + "subscriptions=" + subscriptions + ")"; } } final class GroupAssignment { private final Map<String, Assignment> assignments; public GroupAssignment(Map<String, Assignment> assignments) { this.assignments = assignments; } public Map<String, Assignment> groupAssignment() { return assignments; } @Override public String toString() { return "GroupAssignment(" + "assignments=" + assignments + ")"; } } /** * The rebalance protocol defines partition assignment and revocation semantics. The purpose is to establish a * consistent set of rules that all consumers in a group follow in order to transfer ownership of a partition. * {@link ConsumerPartitionAssignor} implementors can claim supporting one or more rebalance protocols via the * {@link ConsumerPartitionAssignor#supportedProtocols()}, and it is their responsibility to respect the rules * of those protocols in their {@link ConsumerPartitionAssignor#assign(Cluster, GroupSubscription)} implementations. * Failures to follow the rules of the supported protocols would lead to runtime error or undefined behavior. * * The {@link RebalanceProtocol#EAGER} rebalance protocol requires a consumer to always revoke all its owned * partitions before participating in a rebalance event. It therefore allows a complete reshuffling of the assignment. * * {@link RebalanceProtocol#COOPERATIVE} rebalance protocol allows a consumer to retain its currently owned * partitions before participating in a rebalance event. The assignor should not reassign any owned partitions * immediately, but instead may indicate consumers the need for partition revocation so that the revoked * partitions can be reassigned to other consumers in the next rebalance event. This is designed for sticky assignment * logic which attempts to minimize partition reassignment with cooperative adjustments. */ enum RebalanceProtocol { EAGER((byte) 0), COOPERATIVE((byte) 1); private final byte id; RebalanceProtocol(byte id) { this.id = id; } public byte id() { return id; } public static RebalanceProtocol forId(byte id) { switch (id) { case 0: return EAGER; case 1: return COOPERATIVE; default: throw new IllegalArgumentException("Unknown rebalance protocol id: " + id); } } } /** * Get a list of configured instances of {@link org.apache.kafka.clients.consumer.ConsumerPartitionAssignor} * based on the class names/types specified by {@link org.apache.kafka.clients.consumer.ConsumerConfig#PARTITION_ASSIGNMENT_STRATEGY_CONFIG} */ static List<ConsumerPartitionAssignor> getAssignorInstances(List<String> assignorClasses, Map<String, Object> configs) { List<ConsumerPartitionAssignor> assignors = new ArrayList<>(); // a map to store assignor name -> assignor class name Map<String, String> assignorNameMap = new HashMap<>(); if (assignorClasses == null) return assignors; for (Object klass : assignorClasses) { // first try to get the class if passed in as a string if (klass instanceof String) { try { klass = Class.forName((String) klass, true, Utils.getContextOrKafkaClassLoader()); } catch (ClassNotFoundException classNotFound) { throw new KafkaException(klass + " ClassNotFoundException exception occurred", classNotFound); } } if (klass instanceof Class<?>) { Object assignor = Utils.newInstance((Class<?>) klass); if (assignor instanceof Configurable) ((Configurable) assignor).configure(configs); if (assignor instanceof ConsumerPartitionAssignor) { String assignorName = ((ConsumerPartitionAssignor) assignor).name(); if (assignorNameMap.containsKey(assignorName)) { throw new KafkaException("The assignor name: '" + assignorName + "' is used in more than one assignor: " + assignorNameMap.get(assignorName) + ", " + assignor.getClass().getName()); } assignorNameMap.put(assignorName, assignor.getClass().getName()); assignors.add((ConsumerPartitionAssignor) assignor); } else { throw new KafkaException(klass + " is not an instance of " + ConsumerPartitionAssignor.class.getName()); } } else { throw new KafkaException("List contains element of type " + klass.getClass().getName() + ", expected String or Class"); } } return assignors; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import java.time.Duration; import java.util.Collection; import org.apache.kafka.common.TopicPartition; /** * A callback interface that the user can implement to trigger custom actions when the set of partitions assigned to the * consumer changes. * <p> * This is applicable when the consumer is having Kafka auto-manage group membership. If the consumer directly assigns partitions, * those partitions will never be reassigned and this callback is not applicable. * <p> * When Kafka is managing the group membership, a partition re-assignment will be triggered any time the members of the group change or the subscription * of the members changes. This can occur when processes die, new process instances are added or old instances come back to life after failure. * Partition re-assignments can also be triggered by changes affecting the subscribed topics (e.g. when the number of partitions is * administratively adjusted). * <p> * There are many uses for this functionality. One common use is saving offsets in a custom store. By saving offsets in * the {@link #onPartitionsRevoked(Collection)} call we can ensure that any time partition assignment changes * the offset gets saved. * <p> * Another use is flushing out any kind of cache of intermediate results the consumer may be keeping. For example, * consider a case where the consumer is subscribed to a topic containing user page views, and the goal is to count the * number of page views per user for each five minute window. Let's say the topic is partitioned by the user id so that * all events for a particular user go to a single consumer instance. The consumer can keep in memory a running * tally of actions per user and only flush these out to a remote data store when its cache gets too big. However if a * partition is reassigned it may want to automatically trigger a flush of this cache, before the new owner takes over * consumption. * <p> * This callback will only execute in the user thread as part of the {@link Consumer#poll(java.time.Duration) poll(long)} call * whenever partition assignment changes. * <p> * Under normal conditions, if a partition is reassigned from one consumer to another, then the old consumer will * always invoke {@link #onPartitionsRevoked(Collection) onPartitionsRevoked} for that partition prior to the new consumer * invoking {@link #onPartitionsAssigned(Collection) onPartitionsAssigned} for the same partition. So if offsets or other state is saved in the * {@link #onPartitionsRevoked(Collection) onPartitionsRevoked} call by one consumer member, it will be always accessible by the time the * other consumer member taking over that partition and triggering its {@link #onPartitionsAssigned(Collection) onPartitionsAssigned} callback to load the state. * <p> * You can think of revocation as a graceful way to give up ownership of a partition. In some cases, the consumer may not have an opportunity to do so. * For example, if the session times out, then the partitions may be reassigned before we have a chance to revoke them gracefully. * For this case, we have a third callback {@link #onPartitionsLost(Collection)}. The difference between this function and * {@link #onPartitionsRevoked(Collection)} is that upon invocation of {@link #onPartitionsLost(Collection)}, the partitions * may already be owned by some other members in the group and therefore users would not be able to commit its consumed offsets for example. * Users could implement these two functions differently (by default, * {@link #onPartitionsLost(Collection)} will be calling {@link #onPartitionsRevoked(Collection)} directly); for example, in the * {@link #onPartitionsLost(Collection)} we should not need to store the offsets since we know these partitions are no longer owned by the consumer * at that time. * <p> * During a rebalance event, the {@link #onPartitionsAssigned(Collection) onPartitionsAssigned} function will always be triggered exactly once when * the rebalance completes. That is, even if there is no newly assigned partitions for a consumer member, its {@link #onPartitionsAssigned(Collection) onPartitionsAssigned} * will still be triggered with an empty collection of partitions. As a result this function can be used also to notify when a rebalance event has happened. * With eager rebalancing, {@link #onPartitionsRevoked(Collection)} will always be called at the start of a rebalance. On the other hand, {@link #onPartitionsLost(Collection)} * will only be called when there were non-empty partitions that were lost. * With cooperative rebalancing, {@link #onPartitionsRevoked(Collection)} and {@link #onPartitionsLost(Collection)} * will only be triggered when there are non-empty partitions revoked or lost from this consumer member during a rebalance event. * <p> * It is possible * for a {@link org.apache.kafka.common.errors.WakeupException} or {@link org.apache.kafka.common.errors.InterruptException} * to be raised from one of these nested invocations. In this case, the exception will be propagated to the current * invocation of {@link KafkaConsumer#poll(java.time.Duration)} in which this callback is being executed. This means it is not * necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread. * Also if the callback function implementation itself throws an exception, this exception will be propagated to the current * invocation of {@link KafkaConsumer#poll(java.time.Duration)} as well. * <p> * Note that callbacks only serve as notification of an assignment change. * They cannot be used to express acceptance of the change. * Hence throwing an exception from a callback does not affect the assignment in any way, * as it will be propagated all the way up to the {@link KafkaConsumer#poll(java.time.Duration)} call. * If user captures the exception in the caller, the callback is still assumed successful and no further retries will be attempted. * <p> * * Here is pseudo-code for a callback implementation for saving offsets: * <pre> * {@code * public class SaveOffsetsOnRebalance implements ConsumerRebalanceListener { * private Consumer<?,?> consumer; * * public SaveOffsetsOnRebalance(Consumer<?,?> consumer) { * this.consumer = consumer; * } * * public void onPartitionsRevoked(Collection<TopicPartition> partitions) { * // save the offsets in an external store using some custom code not described here * for(TopicPartition partition: partitions) * saveOffsetInExternalStore(consumer.position(partition)); * } * * public void onPartitionsLost(Collection<TopicPartition> partitions) { * // do not need to save the offsets since these partitions are probably owned by other consumers already * } * * public void onPartitionsAssigned(Collection<TopicPartition> partitions) { * // read the offsets from an external store using some custom code not described here * for(TopicPartition partition: partitions) * consumer.seek(partition, readOffsetFromExternalStore(partition)); * } * } * } * </pre> */ public interface ConsumerRebalanceListener { /** * A callback method the user can implement to provide handling of offset commits to a customized store. * This method will be called during a rebalance operation when the consumer has to give up some partitions. * It can also be called when consumer is being closed ({@link KafkaConsumer#close(Duration)}) * or is unsubscribing ({@link KafkaConsumer#unsubscribe()}). * It is recommended that offsets should be committed in this callback to either Kafka or a * custom offset store to prevent duplicate data. * <p> * In eager rebalancing, it will always be called at the start of a rebalance and after the consumer stops fetching data. * In cooperative rebalancing, it will be called at the end of a rebalance on the set of partitions being revoked iff the set is non-empty. * For examples on usage of this API, see Usage Examples section of {@link KafkaConsumer KafkaConsumer}. * <p> * It is common for the revocation callback to use the consumer instance in order to commit offsets. It is possible * for a {@link org.apache.kafka.common.errors.WakeupException} or {@link org.apache.kafka.common.errors.InterruptException} * to be raised from one of these nested invocations. In this case, the exception will be propagated to the current * invocation of {@link KafkaConsumer#poll(java.time.Duration)} in which this callback is being executed. This means it is not * necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread. * * @param partitions The list of partitions that were assigned to the consumer and now need to be revoked (may not * include all currently assigned partitions, i.e. there may still be some partitions left) * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} * @throws org.apache.kafka.common.errors.InterruptException If raised from a nested call to {@link KafkaConsumer} */ void onPartitionsRevoked(Collection<TopicPartition> partitions); /** * A callback method the user can implement to provide handling of customized offsets on completion of a successful * partition re-assignment. This method will be called after the partition re-assignment completes and before the * consumer starts fetching data, and only as the result of a {@link Consumer#poll(java.time.Duration) poll(long)} call. * <p> * It is guaranteed that under normal conditions all the processes in a consumer group will execute their * {@link #onPartitionsRevoked(Collection)} callback before any instance executes its * {@link #onPartitionsAssigned(Collection)} callback. During exceptional scenarios, partitions may be migrated * without the old owner being notified (i.e. their {@link #onPartitionsRevoked(Collection)} callback not triggered), * and later when the old owner consumer realized this event, the {@link #onPartitionsLost(Collection)} (Collection)} callback * will be triggered by the consumer then. * <p> * It is common for the assignment callback to use the consumer instance in order to query offsets. It is possible * for a {@link org.apache.kafka.common.errors.WakeupException} or {@link org.apache.kafka.common.errors.InterruptException} * to be raised from one of these nested invocations. In this case, the exception will be propagated to the current * invocation of {@link KafkaConsumer#poll(java.time.Duration)} in which this callback is being executed. This means it is not * necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread. * * @param partitions The list of partitions that are now assigned to the consumer (previously owned partitions will * NOT be included, i.e. this list will only include newly added partitions) * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} * @throws org.apache.kafka.common.errors.InterruptException If raised from a nested call to {@link KafkaConsumer} */ void onPartitionsAssigned(Collection<TopicPartition> partitions); /** * A callback method you can implement to provide handling of cleaning up resources for partitions that have already * been reassigned to other consumers. This method will not be called during normal execution as the owned partitions would * first be revoked by calling the {@link ConsumerRebalanceListener#onPartitionsRevoked}, before being reassigned * to other consumers during a rebalance event. However, during exceptional scenarios when the consumer realized that it * does not own this partition any longer, i.e. not revoked via a normal rebalance event, then this method would be invoked. * <p> * For example, this function is called if a consumer's session timeout has expired, or if a fatal error has been * received indicating the consumer is no longer part of the group. * <p> * By default it will just trigger {@link ConsumerRebalanceListener#onPartitionsRevoked}; for users who want to distinguish * the handling logic of revoked partitions v.s. lost partitions, they can override the default implementation. * <p> * It is possible * for a {@link org.apache.kafka.common.errors.WakeupException} or {@link org.apache.kafka.common.errors.InterruptException} * to be raised from one of these nested invocations. In this case, the exception will be propagated to the current * invocation of {@link KafkaConsumer#poll(java.time.Duration)} in which this callback is being executed. This means it is not * necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread. * * @param partitions The list of partitions that were assigned to the consumer and now have been reassigned * to other consumers. With the current protocol this will always include all of the consumer's * previously assigned partitions, but this may change in future protocols (ie there would still * be some partitions left) * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} * @throws org.apache.kafka.common.errors.InterruptException If raised from a nested call to {@link KafkaConsumer} */ default void onPartitionsLost(Collection<TopicPartition> partitions) { onPartitionsRevoked(partitions); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/ConsumerRecord.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.TimestampType; import java.util.Optional; /** * A key/value pair to be received from Kafka. This also consists of a topic name and * a partition number from which the record is being received, an offset that points * to the record in a Kafka partition, and a timestamp as marked by the corresponding ProducerRecord. */ public class ConsumerRecord<K, V> { public static final long NO_TIMESTAMP = RecordBatch.NO_TIMESTAMP; public static final int NULL_SIZE = -1; /** * @deprecated checksums are no longer exposed by this class, this constant will be removed in Apache Kafka 4.0 * (deprecated since 3.0). */ @Deprecated public static final int NULL_CHECKSUM = -1; private final String topic; private final int partition; private final long offset; private final long timestamp; private final TimestampType timestampType; private final int serializedKeySize; private final int serializedValueSize; private final Headers headers; private final K key; private final V value; private final Optional<Integer> leaderEpoch; /** * Creates a record to be received from a specified topic and partition (provided for * compatibility with Kafka 0.9 before the message format supported timestamps and before * serialized metadata were exposed). * * @param topic The topic this record is received from * @param partition The partition of the topic this record is received from * @param offset The offset of this record in the corresponding Kafka partition * @param key The key of the record, if one exists (null is allowed) * @param value The record contents */ public ConsumerRecord(String topic, int partition, long offset, K key, V value) { this(topic, partition, offset, NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, NULL_SIZE, NULL_SIZE, key, value, new RecordHeaders(), Optional.empty()); } /** * Creates a record to be received from a specified topic and partition * * @param topic The topic this record is received from * @param partition The partition of the topic this record is received from * @param offset The offset of this record in the corresponding Kafka partition * @param timestamp The timestamp of the record. * @param timestampType The timestamp type * @param serializedKeySize The length of the serialized key * @param serializedValueSize The length of the serialized value * @param key The key of the record, if one exists (null is allowed) * @param value The record contents * @param headers The headers of the record * @param leaderEpoch Optional leader epoch of the record (may be empty for legacy record formats) */ public ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, int serializedKeySize, int serializedValueSize, K key, V value, Headers headers, Optional<Integer> leaderEpoch) { if (topic == null) throw new IllegalArgumentException("Topic cannot be null"); if (headers == null) throw new IllegalArgumentException("Headers cannot be null"); this.topic = topic; this.partition = partition; this.offset = offset; this.timestamp = timestamp; this.timestampType = timestampType; this.serializedKeySize = serializedKeySize; this.serializedValueSize = serializedValueSize; this.key = key; this.value = value; this.headers = headers; this.leaderEpoch = leaderEpoch; } /** * Creates a record to be received from a specified topic and partition (provided for * compatibility with Kafka 0.10 before the message format supported headers). * * @param topic The topic this record is received from * @param partition The partition of the topic this record is received from * @param offset The offset of this record in the corresponding Kafka partition * @param timestamp The timestamp of the record. * @param timestampType The timestamp type * @param serializedKeySize The length of the serialized key * @param serializedValueSize The length of the serialized value * @param key The key of the record, if one exists (null is allowed) * @param value The record contents * * @deprecated use one of the constructors without a `checksum` parameter. This constructor will be removed in * Apache Kafka 4.0 (deprecated since 3.0). */ @Deprecated public ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, long checksum, int serializedKeySize, int serializedValueSize, K key, V value) { this(topic, partition, offset, timestamp, timestampType, serializedKeySize, serializedValueSize, key, value, new RecordHeaders(), Optional.empty()); } /** * Creates a record to be received from a specified topic and partition * * @param topic The topic this record is received from * @param partition The partition of the topic this record is received from * @param offset The offset of this record in the corresponding Kafka partition * @param timestamp The timestamp of the record. * @param timestampType The timestamp type * @param serializedKeySize The length of the serialized key * @param serializedValueSize The length of the serialized value * @param key The key of the record, if one exists (null is allowed) * @param value The record contents * @param headers The headers of the record. * * @deprecated use one of the constructors without a `checksum` parameter. This constructor will be removed in * Apache Kafka 4.0 (deprecated since 3.0). */ @Deprecated public ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, Long checksum, int serializedKeySize, int serializedValueSize, K key, V value, Headers headers) { this(topic, partition, offset, timestamp, timestampType, serializedKeySize, serializedValueSize, key, value, headers, Optional.empty()); } /** * Creates a record to be received from a specified topic and partition * * @param topic The topic this record is received from * @param partition The partition of the topic this record is received from * @param offset The offset of this record in the corresponding Kafka partition * @param timestamp The timestamp of the record. * @param timestampType The timestamp type * @param serializedKeySize The length of the serialized key * @param serializedValueSize The length of the serialized value * @param key The key of the record, if one exists (null is allowed) * @param value The record contents * @param headers The headers of the record * @param leaderEpoch Optional leader epoch of the record (may be empty for legacy record formats) * * @deprecated use one of the constructors without a `checksum` parameter. This constructor will be removed in * Apache Kafka 4.0 (deprecated since 3.0). */ @Deprecated public ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, Long checksum, int serializedKeySize, int serializedValueSize, K key, V value, Headers headers, Optional<Integer> leaderEpoch) { this(topic, partition, offset, timestamp, timestampType, serializedKeySize, serializedValueSize, key, value, headers, leaderEpoch); } /** * The topic this record is received from (never null) */ public String topic() { return this.topic; } /** * The partition from which this record is received */ public int partition() { return this.partition; } /** * The headers (never null) */ public Headers headers() { return headers; } /** * The key (or null if no key is specified) */ public K key() { return key; } /** * The value */ public V value() { return value; } /** * The position of this record in the corresponding Kafka partition. */ public long offset() { return offset; } /** * The timestamp of this record */ public long timestamp() { return timestamp; } /** * The timestamp type of this record */ public TimestampType timestampType() { return timestampType; } /** * The size of the serialized, uncompressed key in bytes. If key is null, the returned size * is -1. */ public int serializedKeySize() { return this.serializedKeySize; } /** * The size of the serialized, uncompressed value in bytes. If value is null, the * returned size is -1. */ public int serializedValueSize() { return this.serializedValueSize; } /** * Get the leader epoch for the record if available * * @return the leader epoch or empty for legacy record formats */ public Optional<Integer> leaderEpoch() { return leaderEpoch; } @Override public String toString() { return "ConsumerRecord(topic = " + topic + ", partition = " + partition + ", leaderEpoch = " + leaderEpoch.orElse(null) + ", offset = " + offset + ", " + timestampType + " = " + timestamp + ", serialized key size = " + serializedKeySize + ", serialized value size = " + serializedValueSize + ", headers = " + headers + ", key = " + key + ", value = " + value + ")"; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/ConsumerRecords.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.utils.AbstractIterator; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; /** * A container that holds the list {@link ConsumerRecord} per partition for a * particular topic. There is one {@link ConsumerRecord} list for every topic * partition returned by a {@link Consumer#poll(java.time.Duration)} operation. */ public class ConsumerRecords<K, V> implements Iterable<ConsumerRecord<K, V>> { public static final ConsumerRecords<Object, Object> EMPTY = new ConsumerRecords<>(Collections.emptyMap()); private final Map<TopicPartition, List<ConsumerRecord<K, V>>> records; public ConsumerRecords(Map<TopicPartition, List<ConsumerRecord<K, V>>> records) { this.records = records; } /** * Get just the records for the given partition * * @param partition The partition to get records for */ public List<ConsumerRecord<K, V>> records(TopicPartition partition) { List<ConsumerRecord<K, V>> recs = this.records.get(partition); if (recs == null) return Collections.emptyList(); else return Collections.unmodifiableList(recs); } /** * Get just the records for the given topic */ public Iterable<ConsumerRecord<K, V>> records(String topic) { if (topic == null) throw new IllegalArgumentException("Topic must be non-null."); List<List<ConsumerRecord<K, V>>> recs = new ArrayList<>(); for (Map.Entry<TopicPartition, List<ConsumerRecord<K, V>>> entry : records.entrySet()) { if (entry.getKey().topic().equals(topic)) recs.add(entry.getValue()); } return new ConcatenatedIterable<>(recs); } /** * Get the partitions which have records contained in this record set. * @return the set of partitions with data in this record set (may be empty if no data was returned) */ public Set<TopicPartition> partitions() { return Collections.unmodifiableSet(records.keySet()); } @Override public Iterator<ConsumerRecord<K, V>> iterator() { return new ConcatenatedIterable<>(records.values()).iterator(); } /** * The number of records for all topics */ public int count() { int count = 0; for (List<ConsumerRecord<K, V>> recs: this.records.values()) count += recs.size(); return count; } private static class ConcatenatedIterable<K, V> implements Iterable<ConsumerRecord<K, V>> { private final Iterable<? extends Iterable<ConsumerRecord<K, V>>> iterables; public ConcatenatedIterable(Iterable<? extends Iterable<ConsumerRecord<K, V>>> iterables) { this.iterables = iterables; } @Override public Iterator<ConsumerRecord<K, V>> iterator() { return new AbstractIterator<ConsumerRecord<K, V>>() { Iterator<? extends Iterable<ConsumerRecord<K, V>>> iters = iterables.iterator(); Iterator<ConsumerRecord<K, V>> current; public ConsumerRecord<K, V> makeNext() { while (current == null || !current.hasNext()) { if (iters.hasNext()) current = iters.next().iterator(); else return allDone(); } return current.next(); } }; } } public boolean isEmpty() { return records.isEmpty(); } @SuppressWarnings("unchecked") public static <K, V> ConsumerRecords<K, V> empty() { return (ConsumerRecords<K, V>) EMPTY; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/CooperativeStickyAssignor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import org.apache.kafka.clients.consumer.internals.AbstractStickyAssignor; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Struct; import org.apache.kafka.common.protocol.types.Type; /** * A cooperative version of the {@link AbstractStickyAssignor AbstractStickyAssignor}. This follows the same (sticky) * assignment logic as {@link StickyAssignor StickyAssignor} but allows for cooperative rebalancing while the * {@link StickyAssignor StickyAssignor} follows the eager rebalancing protocol. See * {@link ConsumerPartitionAssignor.RebalanceProtocol} for an explanation of the rebalancing protocols. * <p> * Users should prefer this assignor for newer clusters. * <p> * To turn on cooperative rebalancing you must set all your consumers to use this {@code PartitionAssignor}, * or implement a custom one that returns {@code RebalanceProtocol.COOPERATIVE} in * {@link CooperativeStickyAssignor#supportedProtocols supportedProtocols()}. * <p> * IMPORTANT: if upgrading from 2.3 or earlier, you must follow a specific upgrade path in order to safely turn on * cooperative rebalancing. See the <a href="https://kafka.apache.org/documentation/#upgrade_240_notable">upgrade guide</a> for details. */ public class CooperativeStickyAssignor extends AbstractStickyAssignor { public static final String COOPERATIVE_STICKY_ASSIGNOR_NAME = "cooperative-sticky"; // these schemas are used for preserving useful metadata for the assignment, such as the last stable generation private static final String GENERATION_KEY_NAME = "generation"; private static final Schema COOPERATIVE_STICKY_ASSIGNOR_USER_DATA_V0 = new Schema( new Field(GENERATION_KEY_NAME, Type.INT32)); private int generation = DEFAULT_GENERATION; // consumer group generation @Override public String name() { return COOPERATIVE_STICKY_ASSIGNOR_NAME; } @Override public List<RebalanceProtocol> supportedProtocols() { return Arrays.asList(RebalanceProtocol.COOPERATIVE, RebalanceProtocol.EAGER); } @Override public void onAssignment(Assignment assignment, ConsumerGroupMetadata metadata) { this.generation = metadata.generationId(); } @Override public ByteBuffer subscriptionUserData(Set<String> topics) { Struct struct = new Struct(COOPERATIVE_STICKY_ASSIGNOR_USER_DATA_V0); struct.set(GENERATION_KEY_NAME, generation); ByteBuffer buffer = ByteBuffer.allocate(COOPERATIVE_STICKY_ASSIGNOR_USER_DATA_V0.sizeOf(struct)); COOPERATIVE_STICKY_ASSIGNOR_USER_DATA_V0.write(buffer, struct); buffer.flip(); return buffer; } @Override protected MemberData memberData(Subscription subscription) { // In ConsumerProtocolSubscription v2 or higher, we can take member data from fields directly if (subscription.generationId().isPresent()) { return new MemberData(subscription.ownedPartitions(), subscription.generationId()); } ByteBuffer buffer = subscription.userData(); Optional<Integer> encodedGeneration; if (buffer == null) { encodedGeneration = Optional.empty(); } else { try { Struct struct = COOPERATIVE_STICKY_ASSIGNOR_USER_DATA_V0.read(buffer); encodedGeneration = Optional.of(struct.getInt(GENERATION_KEY_NAME)); } catch (Exception e) { encodedGeneration = Optional.of(DEFAULT_GENERATION); } } return new MemberData(subscription.ownedPartitions(), encodedGeneration, subscription.rackId()); } @Override public Map<String, List<TopicPartition>> assignPartitions(Map<String, List<PartitionInfo>> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignments = super.assignPartitions(partitionsPerTopic, subscriptions); Map<TopicPartition, String> partitionsTransferringOwnership = super.partitionsTransferringOwnership == null ? computePartitionsTransferringOwnership(subscriptions, assignments) : super.partitionsTransferringOwnership; adjustAssignment(assignments, partitionsTransferringOwnership); return assignments; } // Following the cooperative rebalancing protocol requires removing partitions that must first be revoked from the assignment private void adjustAssignment(Map<String, List<TopicPartition>> assignments, Map<TopicPartition, String> partitionsTransferringOwnership) { for (Map.Entry<TopicPartition, String> partitionEntry : partitionsTransferringOwnership.entrySet()) { assignments.get(partitionEntry.getValue()).remove(partitionEntry.getKey()); } } private Map<TopicPartition, String> computePartitionsTransferringOwnership(Map<String, Subscription> subscriptions, Map<String, List<TopicPartition>> assignments) { Map<TopicPartition, String> allAddedPartitions = new HashMap<>(); Set<TopicPartition> allRevokedPartitions = new HashSet<>(); for (final Map.Entry<String, List<TopicPartition>> entry : assignments.entrySet()) { String consumer = entry.getKey(); List<TopicPartition> ownedPartitions = subscriptions.get(consumer).ownedPartitions(); List<TopicPartition> assignedPartitions = entry.getValue(); Set<TopicPartition> ownedPartitionsSet = new HashSet<>(ownedPartitions); for (TopicPartition tp : assignedPartitions) { if (!ownedPartitionsSet.contains(tp)) allAddedPartitions.put(tp, consumer); } Set<TopicPartition> assignedPartitionsSet = new HashSet<>(assignedPartitions); for (TopicPartition tp : ownedPartitions) { if (!assignedPartitionsSet.contains(tp)) allRevokedPartitions.add(tp); } } allAddedPartitions.keySet().retainAll(allRevokedPartitions); return allAddedPartitions; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/InvalidOffsetException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; import java.util.Set; /** * Thrown when the offset for a set of partitions is invalid (either undefined or out of range), * and no reset policy has been configured. * @see NoOffsetForPartitionException * @see OffsetOutOfRangeException */ public abstract class InvalidOffsetException extends KafkaException { public InvalidOffsetException(String message) { super(message); } public abstract Set<TopicPartition> partitions(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/KafkaConsumer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.ClientUtils; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.NetworkClient; import org.apache.kafka.clients.consumer.internals.ConsumerCoordinator; import org.apache.kafka.clients.consumer.internals.ConsumerInterceptors; import org.apache.kafka.clients.consumer.internals.ConsumerMetadata; import org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient; import org.apache.kafka.clients.consumer.internals.Fetch; import org.apache.kafka.clients.consumer.internals.FetchConfig; import org.apache.kafka.clients.consumer.internals.FetchMetricsManager; import org.apache.kafka.clients.consumer.internals.Fetcher; import org.apache.kafka.clients.consumer.internals.FetchMetricsRegistry; import org.apache.kafka.clients.consumer.internals.KafkaConsumerMetrics; import org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener; import org.apache.kafka.clients.consumer.internals.OffsetFetcher; import org.apache.kafka.clients.consumer.internals.SubscriptionState; import org.apache.kafka.clients.consumer.internals.TopicMetadataFetcher; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidGroupIdException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.metrics.KafkaMetricsContext; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.MetricsContext; import org.apache.kafka.common.metrics.MetricsReporter; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.network.ChannelBuilder; import org.apache.kafka.common.network.Selector; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.superstream.Consts; import org.apache.kafka.common.superstream.Superstream; import org.apache.kafka.common.utils.AppInfoParser; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import org.slf4j.event.Level; import java.net.InetSocketAddress; import java.time.Duration; import java.util.Collection; import java.util.Collections; import java.util.ConcurrentModificationException; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.OptionalLong; import java.util.Properties; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Pattern; /** * A client that consumes records from a Kafka cluster. * <p> * This client transparently handles the failure of Kafka brokers, and transparently adapts as topic partitions * it fetches migrate within the cluster. This client also interacts with the broker to allow groups of * consumers to load balance consumption using <a href="#consumergroups">consumer groups</a>. * <p> * The consumer maintains TCP connections to the necessary brokers to fetch data. * Failure to close the consumer after use will leak these connections. * The consumer is not thread-safe. See <a href="#multithreaded">Multi-threaded Processing</a> for more details. * * <h3>Cross-Version Compatibility</h3> * This client can communicate with brokers that are version 0.10.0 or newer. Older or newer brokers may not support * certain features. For example, 0.10.0 brokers do not support offsetsForTimes, because this feature was added * in version 0.10.1. You will receive an {@link org.apache.kafka.common.errors.UnsupportedVersionException} * when invoking an API that is not available on the running broker version. * <p> * * <h3>Offsets and Consumer Position</h3> * Kafka maintains a numerical offset for each record in a partition. This offset acts as a unique identifier of * a record within that partition, and also denotes the position of the consumer in the partition. For example, a consumer * which is at position 5 has consumed records with offsets 0 through 4 and will next receive the record with offset 5. There * are actually two notions of position relevant to the user of the consumer: * <p> * The {@link #position(TopicPartition) position} of the consumer gives the offset of the next record that will be given * out. It will be one larger than the highest offset the consumer has seen in that partition. It automatically advances * every time the consumer receives messages in a call to {@link #poll(Duration)}. * <p> * The {@link #commitSync() committed position} is the last offset that has been stored securely. Should the * process fail and restart, this is the offset that the consumer will recover to. The consumer can either automatically commit * offsets periodically; or it can choose to control this committed position manually by calling one of the commit APIs * (e.g. {@link #commitSync() commitSync} and {@link #commitAsync(OffsetCommitCallback) commitAsync}). * <p> * This distinction gives the consumer control over when a record is considered consumed. It is discussed in further * detail below. * * <h3><a name="consumergroups">Consumer Groups and Topic Subscriptions</a></h3> * * Kafka uses the concept of <i>consumer groups</i> to allow a pool of processes to divide the work of consuming and * processing records. These processes can either be running on the same machine or they can be * distributed over many machines to provide scalability and fault tolerance for processing. All consumer instances * sharing the same {@code group.id} will be part of the same consumer group. * <p> * Each consumer in a group can dynamically set the list of topics it wants to subscribe to through one of the * {@link #subscribe(Collection, ConsumerRebalanceListener) subscribe} APIs. Kafka will deliver each message in the * subscribed topics to one process in each consumer group. This is achieved by balancing the partitions between all * members in the consumer group so that each partition is assigned to exactly one consumer in the group. So if there * is a topic with four partitions, and a consumer group with two processes, each process would consume from two partitions. * <p> * Membership in a consumer group is maintained dynamically: if a process fails, the partitions assigned to it will * be reassigned to other consumers in the same group. Similarly, if a new consumer joins the group, partitions will be moved * from existing consumers to the new one. This is known as <i>rebalancing</i> the group and is discussed in more * detail <a href="#failuredetection">below</a>. Group rebalancing is also used when new partitions are added * to one of the subscribed topics or when a new topic matching a {@link #subscribe(Pattern, ConsumerRebalanceListener) subscribed regex} * is created. The group will automatically detect the new partitions through periodic metadata refreshes and * assign them to members of the group. * <p> * Conceptually you can think of a consumer group as being a single logical subscriber that happens to be made up of * multiple processes. As a multi-subscriber system, Kafka naturally supports having any number of consumer groups for a * given topic without duplicating data (additional consumers are actually quite cheap). * <p> * This is a slight generalization of the functionality that is common in messaging systems. To get semantics similar to * a queue in a traditional messaging system all processes would be part of a single consumer group and hence record * delivery would be balanced over the group like with a queue. Unlike a traditional messaging system, though, you can * have multiple such groups. To get semantics similar to pub-sub in a traditional messaging system each process would * have its own consumer group, so each process would subscribe to all the records published to the topic. * <p> * In addition, when group reassignment happens automatically, consumers can be notified through a {@link ConsumerRebalanceListener}, * which allows them to finish necessary application-level logic such as state cleanup, manual offset * commits, etc. See <a href="#rebalancecallback">Storing Offsets Outside Kafka</a> for more details. * <p> * It is also possible for the consumer to <a href="#manualassignment">manually assign</a> specific partitions * (similar to the older "simple" consumer) using {@link #assign(Collection)}. In this case, dynamic partition * assignment and consumer group coordination will be disabled. * * <h3><a name="failuredetection">Detecting Consumer Failures</a></h3> * * After subscribing to a set of topics, the consumer will automatically join the group when {@link #poll(Duration)} is * invoked. The poll API is designed to ensure consumer liveness. As long as you continue to call poll, the consumer * will stay in the group and continue to receive messages from the partitions it was assigned. Underneath the covers, * the consumer sends periodic heartbeats to the server. If the consumer crashes or is unable to send heartbeats for * a duration of {@code session.timeout.ms}, then the consumer will be considered dead and its partitions will * be reassigned. * <p> * It is also possible that the consumer could encounter a "livelock" situation where it is continuing * to send heartbeats, but no progress is being made. To prevent the consumer from holding onto its partitions * indefinitely in this case, we provide a liveness detection mechanism using the {@code max.poll.interval.ms} * setting. Basically if you don't call poll at least as frequently as the configured max interval, * then the client will proactively leave the group so that another consumer can take over its partitions. When this happens, * you may see an offset commit failure (as indicated by a {@link CommitFailedException} thrown from a call to {@link #commitSync()}). * This is a safety mechanism which guarantees that only active members of the group are able to commit offsets. * So to stay in the group, you must continue to call poll. * <p> * The consumer provides two configuration settings to control the behavior of the poll loop: * <ol> * <li><code>max.poll.interval.ms</code>: By increasing the interval between expected polls, you can give * the consumer more time to handle a batch of records returned from {@link #poll(Duration)}. The drawback * is that increasing this value may delay a group rebalance since the consumer will only join the rebalance * inside the call to poll. You can use this setting to bound the time to finish a rebalance, but * you risk slower progress if the consumer cannot actually call {@link #poll(Duration) poll} often enough.</li> * <li><code>max.poll.records</code>: Use this setting to limit the total records returned from a single * call to poll. This can make it easier to predict the maximum that must be handled within each poll * interval. By tuning this value, you may be able to reduce the poll interval, which will reduce the * impact of group rebalancing.</li> * </ol> * <p> * For use cases where message processing time varies unpredictably, neither of these options may be sufficient. * The recommended way to handle these cases is to move message processing to another thread, which allows * the consumer to continue calling {@link #poll(Duration) poll} while the processor is still working. * Some care must be taken to ensure that committed offsets do not get ahead of the actual position. * Typically, you must disable automatic commits and manually commit processed offsets for records only after the * thread has finished handling them (depending on the delivery semantics you need). * Note also that you will need to {@link #pause(Collection) pause} the partition so that no new records are received * from poll until after thread has finished handling those previously returned. * * <h3>Usage Examples</h3> * The consumer APIs offer flexibility to cover a variety of consumption use cases. Here are some examples to * demonstrate how to use them. * * <h4>Automatic Offset Committing</h4> * This example demonstrates a simple usage of Kafka's consumer api that relies on automatic offset committing. * <p> * <pre> * Properties props = new Properties(); * props.setProperty(&quot;bootstrap.servers&quot;, &quot;localhost:9092&quot;); * props.setProperty(&quot;group.id&quot;, &quot;test&quot;); * props.setProperty(&quot;enable.auto.commit&quot;, &quot;true&quot;); * props.setProperty(&quot;auto.commit.interval.ms&quot;, &quot;1000&quot;); * props.setProperty(&quot;key.deserializer&quot;, &quot;org.apache.kafka.common.serialization.StringDeserializer&quot;); * props.setProperty(&quot;value.deserializer&quot;, &quot;org.apache.kafka.common.serialization.StringDeserializer&quot;); * KafkaConsumer&lt;String, String&gt; consumer = new KafkaConsumer&lt;&gt;(props); * consumer.subscribe(Arrays.asList(&quot;foo&quot;, &quot;bar&quot;)); * while (true) { * ConsumerRecords&lt;String, String&gt; records = consumer.poll(Duration.ofMillis(100)); * for (ConsumerRecord&lt;String, String&gt; record : records) * System.out.printf(&quot;offset = %d, key = %s, value = %s%n&quot;, record.offset(), record.key(), record.value()); * } * </pre> * * The connection to the cluster is bootstrapped by specifying a list of one or more brokers to contact using the * configuration {@code bootstrap.servers}. This list is just used to discover the rest of the brokers in the * cluster and need not be an exhaustive list of servers in the cluster (though you may want to specify more than one in * case there are servers down when the client is connecting). * <p> * Setting {@code enable.auto.commit} means that offsets are committed automatically with a frequency controlled by * the config {@code auto.commit.interval.ms}. * <p> * In this example the consumer is subscribing to the topics <i>foo</i> and <i>bar</i> as part of a group of consumers * called <i>test</i> as configured with {@code group.id}. * <p> * The deserializer settings specify how to turn bytes into objects. For example, by specifying string deserializers, we * are saying that our record's key and value will just be simple strings. * * <h4>Manual Offset Control</h4> * * Instead of relying on the consumer to periodically commit consumed offsets, users can also control when records * should be considered as consumed and hence commit their offsets. This is useful when the consumption of the messages * is coupled with some processing logic and hence a message should not be considered as consumed until it is completed processing. * <p> * <pre> * Properties props = new Properties(); * props.setProperty(&quot;bootstrap.servers&quot;, &quot;localhost:9092&quot;); * props.setProperty(&quot;group.id&quot;, &quot;test&quot;); * props.setProperty(&quot;enable.auto.commit&quot;, &quot;false&quot;); * props.setProperty(&quot;key.deserializer&quot;, &quot;org.apache.kafka.common.serialization.StringDeserializer&quot;); * props.setProperty(&quot;value.deserializer&quot;, &quot;org.apache.kafka.common.serialization.StringDeserializer&quot;); * KafkaConsumer&lt;String, String&gt; consumer = new KafkaConsumer&lt;&gt;(props); * consumer.subscribe(Arrays.asList(&quot;foo&quot;, &quot;bar&quot;)); * final int minBatchSize = 200; * List&lt;ConsumerRecord&lt;String, String&gt;&gt; buffer = new ArrayList&lt;&gt;(); * while (true) { * ConsumerRecords&lt;String, String&gt; records = consumer.poll(Duration.ofMillis(100)); * for (ConsumerRecord&lt;String, String&gt; record : records) { * buffer.add(record); * } * if (buffer.size() &gt;= minBatchSize) { * insertIntoDb(buffer); * consumer.commitSync(); * buffer.clear(); * } * } * </pre> * * In this example we will consume a batch of records and batch them up in memory. When we have enough records * batched, we will insert them into a database. If we allowed offsets to auto commit as in the previous example, records * would be considered consumed after they were returned to the user in {@link #poll(Duration) poll}. It would then be * possible * for our process to fail after batching the records, but before they had been inserted into the database. * <p> * To avoid this, we will manually commit the offsets only after the corresponding records have been inserted into the * database. This gives us exact control of when a record is considered consumed. This raises the opposite possibility: * the process could fail in the interval after the insert into the database but before the commit (even though this * would likely just be a few milliseconds, it is a possibility). In this case the process that took over consumption * would consume from last committed offset and would repeat the insert of the last batch of data. Used in this way * Kafka provides what is often called "at-least-once" delivery guarantees, as each record will likely be delivered one * time but in failure cases could be duplicated. * <p> * <b>Note: Using automatic offset commits can also give you "at-least-once" delivery, but the requirement is that * you must consume all data returned from each call to {@link #poll(Duration)} before any subsequent calls, or before * {@link #close() closing} the consumer. If you fail to do either of these, it is possible for the committed offset * to get ahead of the consumed position, which results in missing records. The advantage of using manual offset * control is that you have direct control over when a record is considered "consumed."</b> * <p> * The above example uses {@link #commitSync() commitSync} to mark all received records as committed. In some cases * you may wish to have even finer control over which records have been committed by specifying an offset explicitly. * In the example below we commit offset after we finish handling the records in each partition. * <p> * <pre> * try { * while(running) { * ConsumerRecords&lt;String, String&gt; records = consumer.poll(Duration.ofMillis(Long.MAX_VALUE)); * for (TopicPartition partition : records.partitions()) { * List&lt;ConsumerRecord&lt;String, String&gt;&gt; partitionRecords = records.records(partition); * for (ConsumerRecord&lt;String, String&gt; record : partitionRecords) { * System.out.println(record.offset() + &quot;: &quot; + record.value()); * } * long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset(); * consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1))); * } * } * } finally { * consumer.close(); * } * </pre> * * <b>Note: The committed offset should always be the offset of the next message that your application will read.</b> * Thus, when calling {@link #commitSync(Map) commitSync(offsets)} you should add one to the offset of the last message processed. * * <h4><a name="manualassignment">Manual Partition Assignment</a></h4> * * In the previous examples, we subscribed to the topics we were interested in and let Kafka dynamically assign a * fair share of the partitions for those topics based on the active consumers in the group. However, in * some cases you may need finer control over the specific partitions that are assigned. For example: * <p> * <ul> * <li>If the process is maintaining some kind of local state associated with that partition (like a * local on-disk key-value store), then it should only get records for the partition it is maintaining on disk. * <li>If the process itself is highly available and will be restarted if it fails (perhaps using a * cluster management framework like YARN, Mesos, or AWS facilities, or as part of a stream processing framework). In * this case there is no need for Kafka to detect the failure and reassign the partition since the consuming process * will be restarted on another machine. * </ul> * <p> * To use this mode, instead of subscribing to the topic using {@link #subscribe(Collection) subscribe}, you just call * {@link #assign(Collection)} with the full list of partitions that you want to consume. * * <pre> * String topic = &quot;foo&quot;; * TopicPartition partition0 = new TopicPartition(topic, 0); * TopicPartition partition1 = new TopicPartition(topic, 1); * consumer.assign(Arrays.asList(partition0, partition1)); * </pre> * * Once assigned, you can call {@link #poll(Duration) poll} in a loop, just as in the preceding examples to consume * records. The group that the consumer specifies is still used for committing offsets, but now the set of partitions * will only change with another call to {@link #assign(Collection) assign}. Manual partition assignment does * not use group coordination, so consumer failures will not cause assigned partitions to be rebalanced. Each consumer * acts independently even if it shares a groupId with another consumer. To avoid offset commit conflicts, you should * usually ensure that the groupId is unique for each consumer instance. * <p> * Note that it isn't possible to mix manual partition assignment (i.e. using {@link #assign(Collection) assign}) * with dynamic partition assignment through topic subscription (i.e. using {@link #subscribe(Collection) subscribe}). * * <h4><a name="rebalancecallback">Storing Offsets Outside Kafka</h4> * * The consumer application need not use Kafka's built-in offset storage, it can store offsets in a store of its own * choosing. The primary use case for this is allowing the application to store both the offset and the results of the * consumption in the same system in a way that both the results and offsets are stored atomically. This is not always * possible, but when it is it will make the consumption fully atomic and give "exactly once" semantics that are * stronger than the default "at-least once" semantics you get with Kafka's offset commit functionality. * <p> * Here are a couple of examples of this type of usage: * <ul> * <li>If the results of the consumption are being stored in a relational database, storing the offset in the database * as well can allow committing both the results and offset in a single transaction. Thus either the transaction will * succeed and the offset will be updated based on what was consumed or the result will not be stored and the offset * won't be updated. * <li>If the results are being stored in a local store it may be possible to store the offset there as well. For * example a search index could be built by subscribing to a particular partition and storing both the offset and the * indexed data together. If this is done in a way that is atomic, it is often possible to have it be the case that even * if a crash occurs that causes unsync'd data to be lost, whatever is left has the corresponding offset stored as well. * This means that in this case the indexing process that comes back having lost recent updates just resumes indexing * from what it has ensuring that no updates are lost. * </ul> * <p> * Each record comes with its own offset, so to manage your own offset you just need to do the following: * * <ul> * <li>Configure <code>enable.auto.commit=false</code> * <li>Use the offset provided with each {@link ConsumerRecord} to save your position. * <li>On restart restore the position of the consumer using {@link #seek(TopicPartition, long)}. * </ul> * * <p> * This type of usage is simplest when the partition assignment is also done manually (this would be likely in the * search index use case described above). If the partition assignment is done automatically special care is * needed to handle the case where partition assignments change. This can be done by providing a * {@link ConsumerRebalanceListener} instance in the call to {@link #subscribe(Collection, ConsumerRebalanceListener)} * and {@link #subscribe(Pattern, ConsumerRebalanceListener)}. * For example, when partitions are taken from a consumer the consumer will want to commit its offset for those partitions by * implementing {@link ConsumerRebalanceListener#onPartitionsRevoked(Collection)}. When partitions are assigned to a * consumer, the consumer will want to look up the offset for those new partitions and correctly initialize the consumer * to that position by implementing {@link ConsumerRebalanceListener#onPartitionsAssigned(Collection)}. * <p> * Another common use for {@link ConsumerRebalanceListener} is to flush any caches the application maintains for * partitions that are moved elsewhere. * * <h4>Controlling The Consumer's Position</h4> * * In most use cases the consumer will simply consume records from beginning to end, periodically committing its * position (either automatically or manually). However Kafka allows the consumer to manually control its position, * moving forward or backwards in a partition at will. This means a consumer can re-consume older records, or skip to * the most recent records without actually consuming the intermediate records. * <p> * There are several instances where manually controlling the consumer's position can be useful. * <p> * One case is for time-sensitive record processing it may make sense for a consumer that falls far enough behind to not * attempt to catch up processing all records, but rather just skip to the most recent records. * <p> * Another use case is for a system that maintains local state as described in the previous section. In such a system * the consumer will want to initialize its position on start-up to whatever is contained in the local store. Likewise * if the local state is destroyed (say because the disk is lost) the state may be recreated on a new machine by * re-consuming all the data and recreating the state (assuming that Kafka is retaining sufficient history). * <p> * Kafka allows specifying the position using {@link #seek(TopicPartition, long)} to specify the new position. Special * methods for seeking to the earliest and latest offset the server maintains are also available ( * {@link #seekToBeginning(Collection)} and {@link #seekToEnd(Collection)} respectively). * * <h4>Consumption Flow Control</h4> * * If a consumer is assigned multiple partitions to fetch data from, it will try to consume from all of them at the same time, * effectively giving these partitions the same priority for consumption. However in some cases consumers may want to * first focus on fetching from some subset of the assigned partitions at full speed, and only start fetching other partitions * when these partitions have few or no data to consume. * * <p> * One of such cases is stream processing, where processor fetches from two topics and performs the join on these two streams. * When one of the topics is long lagging behind the other, the processor would like to pause fetching from the ahead topic * in order to get the lagging stream to catch up. Another example is bootstraping upon consumer starting up where there are * a lot of history data to catch up, the applications usually want to get the latest data on some of the topics before consider * fetching other topics. * * <p> * Kafka supports dynamic controlling of consumption flows by using {@link #pause(Collection)} and {@link #resume(Collection)} * to pause the consumption on the specified assigned partitions and resume the consumption * on the specified paused partitions respectively in the future {@link #poll(Duration)} calls. * * <h3>Reading Transactional Messages</h3> * * <p> * Transactions were introduced in Kafka 0.11.0 wherein applications can write to multiple topics and partitions atomically. * In order for this to work, consumers reading from these partitions should be configured to only read committed data. * This can be achieved by setting the {@code isolation.level=read_committed} in the consumer's configuration. * * <p> * In <code>read_committed</code> mode, the consumer will read only those transactional messages which have been * successfully committed. It will continue to read non-transactional messages as before. There is no client-side * buffering in <code>read_committed</code> mode. Instead, the end offset of a partition for a <code>read_committed</code> * consumer would be the offset of the first message in the partition belonging to an open transaction. This offset * is known as the 'Last Stable Offset'(LSO).</p> * * <p> * A {@code read_committed} consumer will only read up to the LSO and filter out any transactional * messages which have been aborted. The LSO also affects the behavior of {@link #seekToEnd(Collection)} and * {@link #endOffsets(Collection)} for {@code read_committed} consumers, details of which are in each method's documentation. * Finally, the fetch lag metrics are also adjusted to be relative to the LSO for {@code read_committed} consumers. * * <p> * Partitions with transactional messages will include commit or abort markers which indicate the result of a transaction. * There markers are not returned to applications, yet have an offset in the log. As a result, applications reading from * topics with transactional messages will see gaps in the consumed offsets. These missing messages would be the transaction * markers, and they are filtered out for consumers in both isolation levels. Additionally, applications using * {@code read_committed} consumers may also see gaps due to aborted transactions, since those messages would not * be returned by the consumer and yet would have valid offsets. * * <h3><a name="multithreaded">Multi-threaded Processing</a></h3> * * The Kafka consumer is NOT thread-safe. All network I/O happens in the thread of the application * making the call. It is the responsibility of the user to ensure that multi-threaded access * is properly synchronized. Un-synchronized access will result in {@link ConcurrentModificationException}. * * <p> * The only exception to this rule is {@link #wakeup()}, which can safely be used from an external thread to * interrupt an active operation. In this case, a {@link org.apache.kafka.common.errors.WakeupException} will be * thrown from the thread blocking on the operation. This can be used to shutdown the consumer from another thread. * The following snippet shows the typical pattern: * * <pre> * public class KafkaConsumerRunner implements Runnable { * private final AtomicBoolean closed = new AtomicBoolean(false); * private final KafkaConsumer consumer; * * public KafkaConsumerRunner(KafkaConsumer consumer) { * this.consumer = consumer; * } * * {@literal}@Override * public void run() { * try { * consumer.subscribe(Arrays.asList("topic")); * while (!closed.get()) { * ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); * // Handle new records * } * } catch (WakeupException e) { * // Ignore exception if closing * if (!closed.get()) throw e; * } finally { * consumer.close(); * } * } * * // Shutdown hook which can be called from a separate thread * public void shutdown() { * closed.set(true); * consumer.wakeup(); * } * } * </pre> * * Then in a separate thread, the consumer can be shutdown by setting the closed flag and waking up the consumer. * * <p> * <pre> * closed.set(true); * consumer.wakeup(); * </pre> * * <p> * Note that while it is possible to use thread interrupts instead of {@link #wakeup()} to abort a blocking operation * (in which case, {@link InterruptException} will be raised), we discourage their use since they may cause a clean * shutdown of the consumer to be aborted. Interrupts are mainly supported for those cases where using {@link #wakeup()} * is impossible, e.g. when a consumer thread is managed by code that is unaware of the Kafka client. * * <p> * We have intentionally avoided implementing a particular threading model for processing. This leaves several * options for implementing multi-threaded processing of records. * * <h4>1. One Consumer Per Thread</h4> * * A simple option is to give each thread its own consumer instance. Here are the pros and cons of this approach: * <ul> * <li><b>PRO</b>: It is the easiest to implement * <li><b>PRO</b>: It is often the fastest as no inter-thread co-ordination is needed * <li><b>PRO</b>: It makes in-order processing on a per-partition basis very easy to implement (each thread just * processes messages in the order it receives them). * <li><b>CON</b>: More consumers means more TCP connections to the cluster (one per thread). In general Kafka handles * connections very efficiently so this is generally a small cost. * <li><b>CON</b>: Multiple consumers means more requests being sent to the server and slightly less batching of data * which can cause some drop in I/O throughput. * <li><b>CON</b>: The number of total threads across all processes will be limited by the total number of partitions. * </ul> * * <h4>2. Decouple Consumption and Processing</h4> * * Another alternative is to have one or more consumer threads that do all data consumption and hands off * {@link ConsumerRecords} instances to a blocking queue consumed by a pool of processor threads that actually handle * the record processing. * * This option likewise has pros and cons: * <ul> * <li><b>PRO</b>: This option allows independently scaling the number of consumers and processors. This makes it * possible to have a single consumer that feeds many processor threads, avoiding any limitation on partitions. * <li><b>CON</b>: Guaranteeing order across the processors requires particular care as the threads will execute * independently an earlier chunk of data may actually be processed after a later chunk of data just due to the luck of * thread execution timing. For processing that has no ordering requirements this is not a problem. * <li><b>CON</b>: Manually committing the position becomes harder as it requires that all threads co-ordinate to ensure * that processing is complete for that partition. * </ul> * * There are many possible variations on this approach. For example each processor thread can have its own queue, and * the consumer threads can hash into these queues using the TopicPartition to ensure in-order consumption and simplify * commit. */ public class KafkaConsumer<K, V> implements Consumer<K, V> { private static final String CLIENT_ID_METRIC_TAG = "client-id"; private static final long NO_CURRENT_THREAD = -1L; private static final String JMX_PREFIX = "kafka.consumer"; static final long DEFAULT_CLOSE_TIMEOUT_MS = 30 * 1000; static final String DEFAULT_REASON = "rebalance enforced by user"; // Visible for testing final Metrics metrics; final KafkaConsumerMetrics kafkaConsumerMetrics; private Logger log; private final String clientId; private final Optional<String> groupId; private final ConsumerCoordinator coordinator; private final Deserializer<K> keyDeserializer; private final Deserializer<V> valueDeserializer; private final Fetcher<K, V> fetcher; private final OffsetFetcher offsetFetcher; private final TopicMetadataFetcher topicMetadataFetcher; private final ConsumerInterceptors<K, V> interceptors; private final IsolationLevel isolationLevel; private final Time time; private final ConsumerNetworkClient client; private final SubscriptionState subscriptions; private final ConsumerMetadata metadata; private final long retryBackoffMs; private final long requestTimeoutMs; private final int defaultApiTimeoutMs; private volatile boolean closed = false; private final List<ConsumerPartitionAssignor> assignors; // currentThread holds the threadId of the current thread accessing KafkaConsumer // and is used to prevent multi-threaded access private final AtomicLong currentThread = new AtomicLong(NO_CURRENT_THREAD); // refcount is used to allow reentrant access by the thread who has acquired currentThread private final AtomicInteger refcount = new AtomicInteger(0); // to keep from repeatedly scanning subscriptions in poll(), cache the result during metadata updates private boolean cachedSubscriptionHasAllFetchPositions; //** added by Superstream Superstream superstreamConnection; // added by Superstream ** /** * A consumer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings * are documented <a href="http://kafka.apache.org/documentation.html#consumerconfigs" >here</a>. Values can be * either strings or objects of the appropriate type (for example a numeric configuration would accept either the * string "42" or the integer 42). * <p> * Valid configuration strings are documented at {@link ConsumerConfig}. * <p> * Note: after creating a {@code KafkaConsumer} you must always {@link #close()} it to avoid resource leaks. * * @param configs The consumer configs */ public KafkaConsumer(Map<String, Object> configs) { this(configs, null, null); } /** * A consumer is instantiated by providing a {@link java.util.Properties} object as configuration. * <p> * Valid configuration strings are documented at {@link ConsumerConfig}. * <p> * Note: after creating a {@code KafkaConsumer} you must always {@link #close()} it to avoid resource leaks. * * @param properties The consumer configuration properties */ public KafkaConsumer(Properties properties) { this(properties, null, null); } /** * A consumer is instantiated by providing a {@link java.util.Properties} object as configuration, and a * key and a value {@link Deserializer}. * <p> * Valid configuration strings are documented at {@link ConsumerConfig}. * <p> * Note: after creating a {@code KafkaConsumer} you must always {@link #close()} it to avoid resource leaks. * * @param properties The consumer configuration properties * @param keyDeserializer The deserializer for key that implements {@link Deserializer}. The configure() method * won't be called in the consumer when the deserializer is passed in directly. * @param valueDeserializer The deserializer for value that implements {@link Deserializer}. The configure() method * won't be called in the consumer when the deserializer is passed in directly. */ public KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer) { this(Utils.propsToMap(properties), keyDeserializer, valueDeserializer); } /** * A consumer is instantiated by providing a set of key-value pairs as configuration, and a key and a value {@link Deserializer}. * <p> * Valid configuration strings are documented at {@link ConsumerConfig}. * <p> * Note: after creating a {@code KafkaConsumer} you must always {@link #close()} it to avoid resource leaks. * * @param configs The consumer configs * @param keyDeserializer The deserializer for key that implements {@link Deserializer}. The configure() method * won't be called in the consumer when the deserializer is passed in directly. * @param valueDeserializer The deserializer for value that implements {@link Deserializer}. The configure() method * won't be called in the consumer when the deserializer is passed in directly. */ public KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer) { this(new ConsumerConfig(ConsumerConfig.appendDeserializerToConfig(configs, keyDeserializer, valueDeserializer)), keyDeserializer, valueDeserializer); } @SuppressWarnings("unchecked") KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer) { try { GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig(config, GroupRebalanceConfig.ProtocolType.CONSUMER); this.groupId = Optional.ofNullable(groupRebalanceConfig.groupId); this.clientId = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG); LogContext logContext; // If group.instance.id is set, we will append it to the log context. if (groupRebalanceConfig.groupInstanceId.isPresent()) { logContext = new LogContext("[Consumer instanceId=" + groupRebalanceConfig.groupInstanceId.get() + ", clientId=" + clientId + ", groupId=" + groupId.orElse("null") + "] "); } else { logContext = new LogContext("[Consumer clientId=" + clientId + ", groupId=" + groupId.orElse("null") + "] "); } this.log = logContext.logger(getClass()); boolean enableAutoCommit = config.maybeOverrideEnableAutoCommit(); groupId.ifPresent(groupIdStr -> { if (groupIdStr.isEmpty()) { log.warn("Support for using the empty group id by consumers is deprecated and will be removed in the next major release."); } }); log.debug("Initializing the Kafka consumer"); this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); this.time = Time.SYSTEM; this.metrics = buildMetrics(config, time, clientId); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); // ** Added by Superstream Map<String, Object> originalsMap = config.originals(); Superstream superstreamConn = (Superstream) originalsMap.get(Consts.superstreamConnectionKey); if (superstreamConn != null) { this.superstreamConnection = superstreamConn; this.superstreamConnection.clientCounters.setMetrics(this.metrics); this.superstreamConnection.setFullClientConfigs(config.values()); try { this.superstreamConnection.waitForSuperstreamConfigs(config); }catch (InterruptedException e) { this.superstreamConnection.getSuperstreamPrintStream().println("Error while waiting for consumer superstream configs"); } } // Added by Superstream ** List<ConsumerInterceptor<K, V>> interceptorList = (List) config.getConfiguredInstances( ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, ConsumerInterceptor.class, Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)); this.interceptors = new ConsumerInterceptors<>(interceptorList); if (keyDeserializer == null) { this.keyDeserializer = config.getConfiguredInstance(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, Deserializer.class); this.keyDeserializer.configure(config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)), true); } else { config.ignore(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); this.keyDeserializer = keyDeserializer; } if (valueDeserializer == null) { this.valueDeserializer = config.getConfiguredInstance(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class); this.valueDeserializer.configure(config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)), false); } else { config.ignore(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); this.valueDeserializer = valueDeserializer; } OffsetResetStrategy offsetResetStrategy = OffsetResetStrategy.valueOf(config.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).toUpperCase(Locale.ROOT)); this.subscriptions = new SubscriptionState(logContext, offsetResetStrategy); ClusterResourceListeners clusterResourceListeners = configureClusterResourceListeners(this.keyDeserializer, this.valueDeserializer, metrics.reporters(), interceptorList); this.metadata = new ConsumerMetadata(retryBackoffMs, config.getLong(ConsumerConfig.METADATA_MAX_AGE_CONFIG), !config.getBoolean(ConsumerConfig.EXCLUDE_INTERNAL_TOPICS_CONFIG), config.getBoolean(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG), subscriptions, logContext, clusterResourceListeners); List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses( config.getList(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG), config.getString(ConsumerConfig.CLIENT_DNS_LOOKUP_CONFIG)); this.metadata.bootstrap(addresses); String metricGrpPrefix = "consumer"; FetchMetricsRegistry metricsRegistry = new FetchMetricsRegistry(Collections.singleton(CLIENT_ID_METRIC_TAG), metricGrpPrefix); FetchMetricsManager fetchMetricsManager = new FetchMetricsManager(metrics, metricsRegistry); ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config, time, logContext); this.isolationLevel = IsolationLevel.valueOf( config.getString(ConsumerConfig.ISOLATION_LEVEL_CONFIG).toUpperCase(Locale.ROOT)); int heartbeatIntervalMs = config.getInt(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG); ApiVersions apiVersions = new ApiVersions(); NetworkClient netClient = new NetworkClient( new Selector(config.getLong(ConsumerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, channelBuilder, logContext), this.metadata, clientId, 100, // a fixed large enough value will suffice for max in-flight requests config.getLong(ConsumerConfig.RECONNECT_BACKOFF_MS_CONFIG), config.getLong(ConsumerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG), config.getInt(ConsumerConfig.SEND_BUFFER_CONFIG), config.getInt(ConsumerConfig.RECEIVE_BUFFER_CONFIG), config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG), config.getLong(ConsumerConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG), config.getLong(ConsumerConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG), time, true, apiVersions, fetchMetricsManager.throttleTimeSensor(), logContext); this.client = new ConsumerNetworkClient( logContext, netClient, metadata, time, retryBackoffMs, config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG), heartbeatIntervalMs); //Will avoid blocking an extended period of time to prevent heartbeat thread starvation this.assignors = ConsumerPartitionAssignor.getAssignorInstances( config.getList(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG), config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)) ); // no coordinator will be constructed for the default (null) group id if (!groupId.isPresent()) { config.ignore(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG); config.ignore(ConsumerConfig.THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED); this.coordinator = null; } else { this.coordinator = new ConsumerCoordinator(groupRebalanceConfig, logContext, this.client, assignors, this.metadata, this.subscriptions, metrics, metricGrpPrefix, this.time, enableAutoCommit, config.getInt(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG), this.interceptors, config.getBoolean(ConsumerConfig.THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED), config.getString(ConsumerConfig.CLIENT_RACK_CONFIG)); } FetchConfig<K, V> fetchConfig = new FetchConfig<>(config, this.keyDeserializer, this.valueDeserializer, isolationLevel); this.fetcher = new Fetcher<>( logContext, this.client, this.metadata, this.subscriptions, fetchConfig, fetchMetricsManager, this.time); this.offsetFetcher = new OffsetFetcher(logContext, client, metadata, subscriptions, time, retryBackoffMs, requestTimeoutMs, isolationLevel, apiVersions); this.topicMetadataFetcher = new TopicMetadataFetcher(logContext, client, retryBackoffMs); this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics, metricGrpPrefix); config.logUnused(); AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds()); log.debug("Kafka consumer initialized"); } catch (Throwable t) { // call close methods if internal objects are already constructed; this is to prevent resource leak. see KAFKA-2121 // we do not need to call `close` at all when `log` is null, which means no internal objects were initialized. if (this.log != null) { close(Duration.ZERO, true); } // now propagate the exception throw new KafkaException("Failed to construct kafka consumer", t); } } // visible for testing KafkaConsumer(LogContext logContext, String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, OffsetFetcher offsetFetcher, TopicMetadataFetcher topicMetadataFetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, ConsumerMetadata metadata, long retryBackoffMs, long requestTimeoutMs, int defaultApiTimeoutMs, List<ConsumerPartitionAssignor> assignors, String groupId) { this.log = logContext.logger(getClass()); this.clientId = clientId; this.coordinator = coordinator; this.keyDeserializer = keyDeserializer; this.valueDeserializer = valueDeserializer; this.fetcher = fetcher; this.offsetFetcher = offsetFetcher; this.topicMetadataFetcher = topicMetadataFetcher; this.isolationLevel = IsolationLevel.READ_UNCOMMITTED; this.interceptors = Objects.requireNonNull(interceptors); this.time = time; this.client = client; this.metrics = metrics; this.subscriptions = subscriptions; this.metadata = metadata; this.retryBackoffMs = retryBackoffMs; this.requestTimeoutMs = requestTimeoutMs; this.defaultApiTimeoutMs = defaultApiTimeoutMs; this.assignors = assignors; this.groupId = Optional.ofNullable(groupId); this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics, "consumer"); } private static Metrics buildMetrics(ConsumerConfig config, Time time, String clientId) { Map<String, String> metricsTags = Collections.singletonMap(CLIENT_ID_METRIC_TAG, clientId); MetricConfig metricConfig = new MetricConfig().samples(config.getInt(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG)) .timeWindow(config.getLong(ConsumerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) .recordLevel(Sensor.RecordingLevel.forName(config.getString(ConsumerConfig.METRICS_RECORDING_LEVEL_CONFIG))) .tags(metricsTags); List<MetricsReporter> reporters = CommonClientConfigs.metricsReporters(clientId, config); MetricsContext metricsContext = new KafkaMetricsContext(JMX_PREFIX, config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); return new Metrics(metricConfig, reporters, time, metricsContext); } /** * Get the set of partitions currently assigned to this consumer. If subscription happened by directly assigning * partitions using {@link #assign(Collection)} then this will simply return the same partitions that * were assigned. If topic subscription was used, then this will give the set of topic partitions currently assigned * to the consumer (which may be none if the assignment hasn't happened yet, or the partitions are in the * process of getting reassigned). * @return The set of partitions currently assigned to this consumer */ public Set<TopicPartition> assignment() { acquireAndEnsureOpen(); try { return Collections.unmodifiableSet(this.subscriptions.assignedPartitions()); } finally { release(); } } /** * Get the current subscription. Will return the same topics used in the most recent call to * {@link #subscribe(Collection, ConsumerRebalanceListener)}, or an empty set if no such call has been made. * @return The set of topics currently subscribed to */ public Set<String> subscription() { acquireAndEnsureOpen(); try { return Collections.unmodifiableSet(new HashSet<>(this.subscriptions.subscription())); } finally { release(); } } /** * Subscribe to the given list of topics to get dynamically * assigned partitions. <b>Topic subscriptions are not incremental. This list will replace the current * assignment (if there is one).</b> Note that it is not possible to combine topic subscription with group management * with manual partition assignment through {@link #assign(Collection)}. * * If the given list of topics is empty, it is treated the same as {@link #unsubscribe()}. * * <p> * As part of group management, the consumer will keep track of the list of consumers that belong to a particular * group and will trigger a rebalance operation if any one of the following events are triggered: * <ul> * <li>Number of partitions change for any of the subscribed topics * <li>A subscribed topic is created or deleted * <li>An existing member of the consumer group is shutdown or fails * <li>A new member is added to the consumer group * </ul> * <p> * When any of these events are triggered, the provided listener will be invoked first to indicate that * the consumer's assignment has been revoked, and then again when the new assignment has been received. * Note that rebalances will only occur during an active call to {@link #poll(Duration)}, so callbacks will * also only be invoked during that time. * * The provided listener will immediately override any listener set in a previous call to subscribe. * It is guaranteed, however, that the partitions revoked/assigned through this interface are from topics * subscribed in this call. See {@link ConsumerRebalanceListener} for more details. * * @param topics The list of topics to subscribe to * @param listener Non-null listener instance to get notifications on partition assignment/revocation for the * subscribed topics * @throws IllegalArgumentException If topics is null or contains null or empty elements, or if listener is null * @throws IllegalStateException If {@code subscribe()} is called previously with pattern, or assign is called * previously (without a subsequent call to {@link #unsubscribe()}), or if not * configured at-least one partition assignment strategy */ @Override public void subscribe(Collection<String> topics, ConsumerRebalanceListener listener) { acquireAndEnsureOpen(); try { maybeThrowInvalidGroupIdException(); if (topics == null) throw new IllegalArgumentException("Topic collection to subscribe to cannot be null"); if (topics.isEmpty()) { // treat subscribing to empty topic list as the same as unsubscribing this.unsubscribe(); } else { for (String topic : topics) { if (Utils.isBlank(topic)) throw new IllegalArgumentException("Topic collection to subscribe to cannot contain null or empty topic"); } throwIfNoAssignorsConfigured(); fetcher.clearBufferedDataForUnassignedTopics(topics); log.info("Subscribed to topic(s): {}", Utils.join(topics, ", ")); if (this.subscriptions.subscribe(new HashSet<>(topics), listener)) metadata.requestUpdateForNewTopics(); } } finally { release(); } } /** * Subscribe to the given list of topics to get dynamically assigned partitions. * <b>Topic subscriptions are not incremental. This list will replace the current * assignment (if there is one).</b> It is not possible to combine topic subscription with group management * with manual partition assignment through {@link #assign(Collection)}. * * If the given list of topics is empty, it is treated the same as {@link #unsubscribe()}. * * <p> * This is a short-hand for {@link #subscribe(Collection, ConsumerRebalanceListener)}, which * uses a no-op listener. If you need the ability to seek to particular offsets, you should prefer * {@link #subscribe(Collection, ConsumerRebalanceListener)}, since group rebalances will cause partition offsets * to be reset. You should also provide your own listener if you are doing your own offset * management since the listener gives you an opportunity to commit offsets before a rebalance finishes. * * @param topics The list of topics to subscribe to * @throws IllegalArgumentException If topics is null or contains null or empty elements * @throws IllegalStateException If {@code subscribe()} is called previously with pattern, or assign is called * previously (without a subsequent call to {@link #unsubscribe()}), or if not * configured at-least one partition assignment strategy */ @Override public void subscribe(Collection<String> topics) { subscribe(topics, new NoOpConsumerRebalanceListener()); } /** * Subscribe to all topics matching specified pattern to get dynamically assigned partitions. * The pattern matching will be done periodically against all topics existing at the time of check. * This can be controlled through the {@code metadata.max.age.ms} configuration: by lowering * the max metadata age, the consumer will refresh metadata more often and check for matching topics. * <p> * See {@link #subscribe(Collection, ConsumerRebalanceListener)} for details on the * use of the {@link ConsumerRebalanceListener}. Generally rebalances are triggered when there * is a change to the topics matching the provided pattern and when consumer group membership changes. * Group rebalances only take place during an active call to {@link #poll(Duration)}. * * @param pattern Pattern to subscribe to * @param listener Non-null listener instance to get notifications on partition assignment/revocation for the * subscribed topics * @throws IllegalArgumentException If pattern or listener is null * @throws IllegalStateException If {@code subscribe()} is called previously with topics, or assign is called * previously (without a subsequent call to {@link #unsubscribe()}), or if not * configured at-least one partition assignment strategy */ @Override public void subscribe(Pattern pattern, ConsumerRebalanceListener listener) { maybeThrowInvalidGroupIdException(); if (pattern == null || pattern.toString().equals("")) throw new IllegalArgumentException("Topic pattern to subscribe to cannot be " + (pattern == null ? "null" : "empty")); acquireAndEnsureOpen(); try { throwIfNoAssignorsConfigured(); log.info("Subscribed to pattern: '{}'", pattern); this.subscriptions.subscribe(pattern, listener); this.coordinator.updatePatternSubscription(metadata.fetch()); this.metadata.requestUpdateForNewTopics(); } finally { release(); } } /** * Subscribe to all topics matching specified pattern to get dynamically assigned partitions. * The pattern matching will be done periodically against topics existing at the time of check. * <p> * This is a short-hand for {@link #subscribe(Pattern, ConsumerRebalanceListener)}, which * uses a no-op listener. If you need the ability to seek to particular offsets, you should prefer * {@link #subscribe(Pattern, ConsumerRebalanceListener)}, since group rebalances will cause partition offsets * to be reset. You should also provide your own listener if you are doing your own offset * management since the listener gives you an opportunity to commit offsets before a rebalance finishes. * * @param pattern Pattern to subscribe to * @throws IllegalArgumentException If pattern is null * @throws IllegalStateException If {@code subscribe()} is called previously with topics, or assign is called * previously (without a subsequent call to {@link #unsubscribe()}), or if not * configured at-least one partition assignment strategy */ @Override public void subscribe(Pattern pattern) { subscribe(pattern, new NoOpConsumerRebalanceListener()); } /** * Unsubscribe from topics currently subscribed with {@link #subscribe(Collection)} or {@link #subscribe(Pattern)}. * This also clears any partitions directly assigned through {@link #assign(Collection)}. * * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors (e.g. rebalance callback errors) */ public void unsubscribe() { acquireAndEnsureOpen(); try { fetcher.clearBufferedDataForUnassignedPartitions(Collections.emptySet()); if (this.coordinator != null) { this.coordinator.onLeavePrepare(); this.coordinator.maybeLeaveGroup("the consumer unsubscribed from all topics"); } this.subscriptions.unsubscribe(); log.info("Unsubscribed all topics or patterns and assigned partitions"); } finally { release(); } } /** * Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment * and will replace the previous assignment (if there is one). * <p> * If the given list of topic partitions is empty, it is treated the same as {@link #unsubscribe()}. * <p> * Manual topic assignment through this method does not use the consumer's group management * functionality. As such, there will be no rebalance operation triggered when group membership or cluster and topic * metadata change. Note that it is not possible to use both manual partition assignment with {@link #assign(Collection)} * and group assignment with {@link #subscribe(Collection, ConsumerRebalanceListener)}. * <p> * If auto-commit is enabled, an async commit (based on the old assignment) will be triggered before the new * assignment replaces the old one. * * @param partitions The list of partitions to assign this consumer * @throws IllegalArgumentException If partitions is null or contains null or empty topics * @throws IllegalStateException If {@code subscribe()} is called previously with topics or pattern * (without a subsequent call to {@link #unsubscribe()}) */ @Override public void assign(Collection<TopicPartition> partitions) { acquireAndEnsureOpen(); try { if (partitions == null) { throw new IllegalArgumentException("Topic partition collection to assign to cannot be null"); } else if (partitions.isEmpty()) { this.unsubscribe(); } else { for (TopicPartition tp : partitions) { String topic = (tp != null) ? tp.topic() : null; if (Utils.isBlank(topic)) throw new IllegalArgumentException("Topic partitions to assign to cannot have null or empty topic"); } fetcher.clearBufferedDataForUnassignedPartitions(partitions); // make sure the offsets of topic partitions the consumer is unsubscribing from // are committed since there will be no following rebalance if (coordinator != null) this.coordinator.maybeAutoCommitOffsetsAsync(time.milliseconds()); log.info("Assigned to partition(s): {}", Utils.join(partitions, ", ")); if (this.subscriptions.assignFromUser(new HashSet<>(partitions))) metadata.requestUpdateForNewTopics(); } } finally { release(); } } /** * Fetch data for the topics or partitions specified using one of the subscribe/assign APIs. It is an error to not have * subscribed to any topics or partitions before polling for data. * <p> * On each poll, consumer will try to use the last consumed offset as the starting offset and fetch sequentially. The last * consumed offset can be manually set through {@link #seek(TopicPartition, long)} or automatically set as the last committed * offset for the subscribed list of partitions * * * @param timeoutMs The time, in milliseconds, spent waiting in poll if data is not available in the buffer. * If 0, returns immediately with any records that are available currently in the buffer, else returns empty. * Must not be negative. * @return map of topic to records since the last fetch for the subscribed list of topics and partitions * * @throws org.apache.kafka.clients.consumer.InvalidOffsetException if the offset for a partition or set of * partitions is undefined or out of range and no offset reset policy has been configured * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if caller lacks Read access to any of the subscribed * topics or to the configured groupId. See the exception for more details * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors (e.g. invalid groupId or * session timeout, errors deserializing key/value pairs, or any new error cases in future versions) * @throws java.lang.IllegalArgumentException if the timeout value is negative * @throws java.lang.IllegalStateException if the consumer is not subscribed to any topics or manually assigned any * partitions to consume from * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this consumer instance gets fenced by broker. * * @deprecated Since 2.0. Use {@link #poll(Duration)}, which does not block beyond the timeout awaiting partition * assignment. See <a href="https://cwiki.apache.org/confluence/x/5kiHB">KIP-266</a> for more information. */ @Deprecated @Override public ConsumerRecords<K, V> poll(final long timeoutMs) { return poll(time.timer(timeoutMs), false); } /** * Fetch data for the topics or partitions specified using one of the subscribe/assign APIs. It is an error to not have * subscribed to any topics or partitions before polling for data. * <p> * On each poll, consumer will try to use the last consumed offset as the starting offset and fetch sequentially. The last * consumed offset can be manually set through {@link #seek(TopicPartition, long)} or automatically set as the last committed * offset for the subscribed list of partitions * * <p> * This method returns immediately if there are records available or if the position advances past control records * or aborted transactions when isolation.level=read_committed. * Otherwise, it will await the passed timeout. If the timeout expires, an empty record set will be returned. * Note that this method may block beyond the timeout in order to execute custom * {@link ConsumerRebalanceListener} callbacks. * * * @param timeout The maximum time to block (must not be greater than {@link Long#MAX_VALUE} milliseconds) * * @return map of topic to records since the last fetch for the subscribed list of topics and partitions * * @throws org.apache.kafka.clients.consumer.InvalidOffsetException if the offset for a partition or set of * partitions is undefined or out of range and no offset reset policy has been configured * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if caller lacks Read access to any of the subscribed * topics or to the configured groupId. See the exception for more details * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors (e.g. invalid groupId or * session timeout, errors deserializing key/value pairs, your rebalance callback thrown exceptions, * or any new error cases in future versions) * @throws java.lang.IllegalArgumentException if the timeout value is negative * @throws java.lang.IllegalStateException if the consumer is not subscribed to any topics or manually assigned any * partitions to consume from * @throws java.lang.ArithmeticException if the timeout is greater than {@link Long#MAX_VALUE} milliseconds. * @throws org.apache.kafka.common.errors.InvalidTopicException if the current subscription contains any invalid * topic (per {@link org.apache.kafka.common.internals.Topic#validate(String)}) * @throws org.apache.kafka.common.errors.UnsupportedVersionException if the consumer attempts to fetch stable offsets * when the broker doesn't support this feature * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this consumer instance gets fenced by broker. */ @Override public ConsumerRecords<K, V> poll(final Duration timeout) { return poll(time.timer(timeout), true); } /** * @throws KafkaException if the rebalance callback throws exception */ private ConsumerRecords<K, V> poll(final Timer timer, final boolean includeMetadataInTimeout) { acquireAndEnsureOpen(); try { this.kafkaConsumerMetrics.recordPollStart(timer.currentTimeMs()); if (this.subscriptions.hasNoSubscriptionOrUserAssignment()) { throw new IllegalStateException("Consumer is not subscribed to any topics or assigned any partitions"); } do { client.maybeTriggerWakeup(); if (includeMetadataInTimeout) { // try to update assignment metadata BUT do not need to block on the timer for join group updateAssignmentMetadataIfNeeded(timer, false); } else { while (!updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE), true)) { log.warn("Still waiting for metadata"); } } final Fetch<K, V> fetch = pollForFetches(timer); if (!fetch.isEmpty()) { // before returning the fetched records, we can send off the next round of fetches // and avoid block waiting for their responses to enable pipelining while the user // is handling the fetched records. // // NOTE: since the consumed position has already been updated, we must not allow // wakeups or any other errors to be triggered prior to returning the fetched records. if (sendFetches() > 0 || client.hasPendingRequests()) { client.transmitSends(); } if (fetch.records().isEmpty()) { log.trace("Returning empty records from `poll()` " + "since the consumer's position has advanced for at least one topic partition"); } return this.interceptors.onConsume(new ConsumerRecords<>(fetch.records())); } } while (timer.notExpired()); return ConsumerRecords.empty(); } finally { release(); this.kafkaConsumerMetrics.recordPollEnd(timer.currentTimeMs()); } } private int sendFetches() { offsetFetcher.validatePositionsOnMetadataChange(); return fetcher.sendFetches(); } boolean updateAssignmentMetadataIfNeeded(final Timer timer, final boolean waitForJoinGroup) { if (coordinator != null && !coordinator.poll(timer, waitForJoinGroup)) { return false; } return updateFetchPositions(timer); } /** * @throws KafkaException if the rebalance callback throws exception */ private Fetch<K, V> pollForFetches(Timer timer) { long pollTimeout = coordinator == null ? timer.remainingMs() : Math.min(coordinator.timeToNextPoll(timer.currentTimeMs()), timer.remainingMs()); // if data is available already, return it immediately final Fetch<K, V> fetch = fetcher.collectFetch(); if (!fetch.isEmpty()) { return fetch; } // send any new fetches (won't resend pending fetches) sendFetches(); // We do not want to be stuck blocking in poll if we are missing some positions // since the offset lookup may be backing off after a failure // NOTE: the use of cachedSubscriptionHasAllFetchPositions means we MUST call // updateAssignmentMetadataIfNeeded before this method. if (!cachedSubscriptionHasAllFetchPositions && pollTimeout > retryBackoffMs) { pollTimeout = retryBackoffMs; } log.trace("Polling for fetches with timeout {}", pollTimeout); Timer pollTimer = time.timer(pollTimeout); client.poll(pollTimer, () -> { // since a fetch might be completed by the background thread, we need this poll condition // to ensure that we do not block unnecessarily in poll() return !fetcher.hasAvailableFetches(); }); timer.update(pollTimer.currentTimeMs()); return fetcher.collectFetch(); } /** * Commit offsets returned on the last {@link #poll(Duration) poll()} for all the subscribed list of topics and * partitions. * <p> * This commits offsets only to Kafka. The offsets committed using this API will be used on the first fetch after * every rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API * should not be used. * <p> * This is a synchronous commit and will block until either the commit succeeds, an unrecoverable error is * encountered (in which case it is thrown to the caller), or the timeout specified by {@code default.api.timeout.ms} expires * (in which case a {@link org.apache.kafka.common.errors.TimeoutException} is thrown to the caller). * <p> * Note that asynchronous offset commits sent previously with the {@link #commitAsync(OffsetCommitCallback)} * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method. * * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This fatal error can only occur if you are using automatic group management with {@link #subscribe(Collection)}, * or if there is an active group with the same <code>group.id</code> which is using group management. In such cases, * when you are trying to commit to partitions that are no longer assigned to this consumer because the * consumer is for example no longer part of the group this exception would be thrown. * @throws org.apache.kafka.common.errors.RebalanceInProgressException if the consumer instance is in the middle of a rebalance * so it is not yet determined which partitions would be assigned to the consumer. In such cases you can first * complete the rebalance by calling {@link #poll(Duration)} and commit can be reconsidered afterwards. * NOTE when you reconsider committing after the rebalance, the assigned partitions may have changed, * and also for those partitions that are still assigned their fetch positions may have changed too * if more records are returned from the {@link #poll(Duration)} call. * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the * configured groupId. See the exception for more details * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors (e.g. if offset metadata * is too large or if the topic does not exist). * @throws org.apache.kafka.common.errors.TimeoutException if the timeout specified by {@code default.api.timeout.ms} expires * before successful completion of the offset commit * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this consumer instance gets fenced by broker. */ @Override public void commitSync() { commitSync(Duration.ofMillis(defaultApiTimeoutMs)); } /** * Commit offsets returned on the last {@link #poll(Duration) poll()} for all the subscribed list of topics and * partitions. * <p> * This commits offsets only to Kafka. The offsets committed using this API will be used on the first fetch after * every rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API * should not be used. * <p> * This is a synchronous commit and will block until either the commit succeeds, an unrecoverable error is * encountered (in which case it is thrown to the caller), or the passed timeout expires. * <p> * Note that asynchronous offset commits sent previously with the {@link #commitAsync(OffsetCommitCallback)} * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method. * * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This can only occur if you are using automatic group management with {@link #subscribe(Collection)}, * or if there is an active group with the same <code>group.id</code> which is using group management. In such cases, * when you are trying to commit to partitions that are no longer assigned to this consumer because the * consumer is for example no longer part of the group this exception would be thrown. * @throws org.apache.kafka.common.errors.RebalanceInProgressException if the consumer instance is in the middle of a rebalance * so it is not yet determined which partitions would be assigned to the consumer. In such cases you can first * complete the rebalance by calling {@link #poll(Duration)} and commit can be reconsidered afterwards. * NOTE when you reconsider committing after the rebalance, the assigned partitions may have changed, * and also for those partitions that are still assigned their fetch positions may have changed too * if more records are returned from the {@link #poll(Duration)} call. * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the * configured groupId. See the exception for more details * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors (e.g. if offset metadata * is too large or if the topic does not exist). * @throws org.apache.kafka.common.errors.TimeoutException if the timeout expires before successful completion * of the offset commit * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this consumer instance gets fenced by broker. */ @Override public void commitSync(Duration timeout) { commitSync(subscriptions.allConsumed(), timeout); } /** * Commit the specified offsets for the specified list of topics and partitions. * <p> * This commits offsets to Kafka. The offsets committed using this API will be used on the first fetch after every * rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API * should not be used. The committed offset should be the next message your application will consume, * i.e. lastProcessedMessageOffset + 1. If automatic group management with {@link #subscribe(Collection)} is used, * then the committed offsets must belong to the currently auto-assigned partitions. * <p> * This is a synchronous commit and will block until either the commit succeeds or an unrecoverable error is * encountered (in which case it is thrown to the caller), or the timeout specified by {@code default.api.timeout.ms} expires * (in which case a {@link org.apache.kafka.common.errors.TimeoutException} is thrown to the caller). * <p> * Note that asynchronous offset commits sent previously with the {@link #commitAsync(OffsetCommitCallback)} * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method. * * @param offsets A map of offsets by partition with associated metadata * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This can only occur if you are using automatic group management with {@link #subscribe(Collection)}, * or if there is an active group with the same <code>group.id</code> which is using group management. In such cases, * when you are trying to commit to partitions that are no longer assigned to this consumer because the * consumer is for example no longer part of the group this exception would be thrown. * @throws org.apache.kafka.common.errors.RebalanceInProgressException if the consumer instance is in the middle of a rebalance * so it is not yet determined which partitions would be assigned to the consumer. In such cases you can first * complete the rebalance by calling {@link #poll(Duration)} and commit can be reconsidered afterwards. * NOTE when you reconsider committing after the rebalance, the assigned partitions may have changed, * and also for those partitions that are still assigned their fetch positions may have changed too * if more records are returned from the {@link #poll(Duration)} call, so when you retry committing * you should consider updating the passed in {@code offset} parameter. * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the * configured groupId. See the exception for more details * @throws java.lang.IllegalArgumentException if the committed offset is negative * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors (e.g. if offset metadata * is too large or if the topic does not exist). * @throws org.apache.kafka.common.errors.TimeoutException if the timeout expires before successful completion * of the offset commit * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this consumer instance gets fenced by broker. */ @Override public void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets) { commitSync(offsets, Duration.ofMillis(defaultApiTimeoutMs)); } /** * Commit the specified offsets for the specified list of topics and partitions. * <p> * This commits offsets to Kafka. The offsets committed using this API will be used on the first fetch after every * rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API * should not be used. The committed offset should be the next message your application will consume, * i.e. lastProcessedMessageOffset + 1. If automatic group management with {@link #subscribe(Collection)} is used, * then the committed offsets must belong to the currently auto-assigned partitions. * <p> * This is a synchronous commit and will block until either the commit succeeds, an unrecoverable error is * encountered (in which case it is thrown to the caller), or the timeout expires. * <p> * Note that asynchronous offset commits sent previously with the {@link #commitAsync(OffsetCommitCallback)} * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method. * * @param offsets A map of offsets by partition with associated metadata * @param timeout The maximum amount of time to await completion of the offset commit * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This can only occur if you are using automatic group management with {@link #subscribe(Collection)}, * or if there is an active group with the same <code>group.id</code> which is using group management. In such cases, * when you are trying to commit to partitions that are no longer assigned to this consumer because the * consumer is for example no longer part of the group this exception would be thrown. * @throws org.apache.kafka.common.errors.RebalanceInProgressException if the consumer instance is in the middle of a rebalance * so it is not yet determined which partitions would be assigned to the consumer. In such cases you can first * complete the rebalance by calling {@link #poll(Duration)} and commit can be reconsidered afterwards. * NOTE when you reconsider committing after the rebalance, the assigned partitions may have changed, * and also for those partitions that are still assigned their fetch positions may have changed too * if more records are returned from the {@link #poll(Duration)} call, so when you retry committing * you should consider updating the passed in {@code offset} parameter. * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the * configured groupId. See the exception for more details * @throws java.lang.IllegalArgumentException if the committed offset is negative * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors (e.g. if offset metadata * is too large or if the topic does not exist). * @throws org.apache.kafka.common.errors.TimeoutException if the timeout expires before successful completion * of the offset commit * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this consumer instance gets fenced by broker. */ @Override public void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets, final Duration timeout) { acquireAndEnsureOpen(); long commitStart = time.nanoseconds(); try { maybeThrowInvalidGroupIdException(); offsets.forEach(this::updateLastSeenEpochIfNewer); if (!coordinator.commitOffsetsSync(new HashMap<>(offsets), time.timer(timeout))) { throw new TimeoutException("Timeout of " + timeout.toMillis() + "ms expired before successfully " + "committing offsets " + offsets); } } finally { kafkaConsumerMetrics.recordCommitSync(time.nanoseconds() - commitStart); release(); } } /** * Commit offsets returned on the last {@link #poll(Duration)} for all the subscribed list of topics and partition. * Same as {@link #commitAsync(OffsetCommitCallback) commitAsync(null)} * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this consumer instance gets fenced by broker. */ @Override public void commitAsync() { commitAsync(null); } /** * Commit offsets returned on the last {@link #poll(Duration) poll()} for the subscribed list of topics and partitions. * <p> * This commits offsets only to Kafka. The offsets committed using this API will be used on the first fetch after * every rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API * should not be used. * <p> * This is an asynchronous call and will not block. Any errors encountered are either passed to the callback * (if provided) or discarded. * <p> * Offsets committed through multiple calls to this API are guaranteed to be sent in the same order as * the invocations. Corresponding commit callbacks are also invoked in the same order. Additionally note that * offsets committed through this API are guaranteed to complete before a subsequent call to {@link #commitSync()} * (and variants) returns. * * @param callback Callback to invoke when the commit completes * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this consumer instance gets fenced by broker. */ @Override public void commitAsync(OffsetCommitCallback callback) { commitAsync(subscriptions.allConsumed(), callback); } /** * Commit the specified offsets for the specified list of topics and partitions to Kafka. * <p> * This commits offsets to Kafka. The offsets committed using this API will be used on the first fetch after every * rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API * should not be used. The committed offset should be the next message your application will consume, * i.e. lastProcessedMessageOffset + 1. If automatic group management with {@link #subscribe(Collection)} is used, * then the committed offsets must belong to the currently auto-assigned partitions. * <p> * This is an asynchronous call and will not block. Any errors encountered are either passed to the callback * (if provided) or discarded. * <p> * Offsets committed through multiple calls to this API are guaranteed to be sent in the same order as * the invocations. Corresponding commit callbacks are also invoked in the same order. Additionally note that * offsets committed through this API are guaranteed to complete before a subsequent call to {@link #commitSync()} * (and variants) returns. * * @param offsets A map of offsets by partition with associate metadata. This map will be copied internally, so it * is safe to mutate the map after returning. * @param callback Callback to invoke when the commit completes * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this consumer instance gets fenced by broker. */ @Override public void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback) { acquireAndEnsureOpen(); try { maybeThrowInvalidGroupIdException(); log.debug("Committing offsets: {}", offsets); offsets.forEach(this::updateLastSeenEpochIfNewer); coordinator.commitOffsetsAsync(new HashMap<>(offsets), callback); } finally { release(); } } /** * Overrides the fetch offsets that the consumer will use on the next {@link #poll(Duration) poll(timeout)}. If this API * is invoked for the same partition more than once, the latest offset will be used on the next poll(). Note that * you may lose data if this API is arbitrarily used in the middle of consumption, to reset the fetch offsets * <p> * The next Consumer Record which will be retrieved when poll() is invoked will have the offset specified, given that * a record with that offset exists (i.e. it is a valid offset). * <p> * {@link #seekToBeginning(Collection)} will go to the first offset in the topic. * seek(0) is equivalent to seekToBeginning for a TopicPartition with beginning offset 0, * assuming that there is a record at offset 0 still available. * {@link #seekToEnd(Collection)} is equivalent to seeking to the last offset of the partition, but behavior depends on * {@code isolation.level}, so see {@link #seekToEnd(Collection)} documentation for more details. * <p> * Seeking to the offset smaller than the log start offset or larger than the log end offset * means an invalid offset is reached. * Invalid offset behaviour is controlled by the {@code auto.offset.reset} property. * If this is set to "earliest", the next poll will return records from the starting offset. * If it is set to "latest", it will seek to the last offset (similar to seekToEnd()). * If it is set to "none", an {@code OffsetOutOfRangeException} will be thrown. * <p> * Note that, the seek offset won't change to the in-flight fetch request, it will take effect in next fetch request. * So, the consumer might wait for {@code fetch.max.wait.ms} before starting to fetch the records from desired offset. * * @param partition the TopicPartition on which the seek will be performed. * @param offset the next offset returned by poll(). * @throws IllegalArgumentException if the provided offset is negative * @throws IllegalStateException if the provided TopicPartition is not assigned to this consumer */ @Override public void seek(TopicPartition partition, long offset) { if (offset < 0) throw new IllegalArgumentException("seek offset must not be a negative number"); acquireAndEnsureOpen(); try { log.info("Seeking to offset {} for partition {}", offset, partition); SubscriptionState.FetchPosition newPosition = new SubscriptionState.FetchPosition( offset, Optional.empty(), // This will ensure we skip validation this.metadata.currentLeader(partition)); this.subscriptions.seekUnvalidated(partition, newPosition); } finally { release(); } } /** * Overrides the fetch offsets that the consumer will use on the next {@link #poll(Duration) poll(timeout)}. If this API * is invoked for the same partition more than once, the latest offset will be used on the next poll(). Note that * you may lose data if this API is arbitrarily used in the middle of consumption, to reset the fetch offsets. This * method allows for setting the leaderEpoch along with the desired offset. * * @throws IllegalArgumentException if the provided offset is negative * @throws IllegalStateException if the provided TopicPartition is not assigned to this consumer */ @Override public void seek(TopicPartition partition, OffsetAndMetadata offsetAndMetadata) { long offset = offsetAndMetadata.offset(); if (offset < 0) { throw new IllegalArgumentException("seek offset must not be a negative number"); } acquireAndEnsureOpen(); try { if (offsetAndMetadata.leaderEpoch().isPresent()) { log.info("Seeking to offset {} for partition {} with epoch {}", offset, partition, offsetAndMetadata.leaderEpoch().get()); } else { log.info("Seeking to offset {} for partition {}", offset, partition); } Metadata.LeaderAndEpoch currentLeaderAndEpoch = this.metadata.currentLeader(partition); SubscriptionState.FetchPosition newPosition = new SubscriptionState.FetchPosition( offsetAndMetadata.offset(), offsetAndMetadata.leaderEpoch(), currentLeaderAndEpoch); this.updateLastSeenEpochIfNewer(partition, offsetAndMetadata); this.subscriptions.seekUnvalidated(partition, newPosition); } finally { release(); } } /** * Seek to the first offset for each of the given partitions. This function evaluates lazily, seeking to the * first offset in all partitions only when {@link #poll(Duration)} or {@link #position(TopicPartition)} are called. * If no partitions are provided, seek to the first offset for all of the currently assigned partitions. * * @throws IllegalArgumentException if {@code partitions} is {@code null} * @throws IllegalStateException if any of the provided partitions are not currently assigned to this consumer */ @Override public void seekToBeginning(Collection<TopicPartition> partitions) { if (partitions == null) throw new IllegalArgumentException("Partitions collection cannot be null"); acquireAndEnsureOpen(); try { Collection<TopicPartition> parts = partitions.size() == 0 ? this.subscriptions.assignedPartitions() : partitions; subscriptions.requestOffsetReset(parts, OffsetResetStrategy.EARLIEST); } finally { release(); } } /** * Seek to the last offset for each of the given partitions. This function evaluates lazily, seeking to the * final offset in all partitions only when {@link #poll(Duration)} or {@link #position(TopicPartition)} are called. * If no partitions are provided, seek to the final offset for all of the currently assigned partitions. * <p> * If {@code isolation.level=read_committed}, the end offset will be the Last Stable Offset, i.e., the offset * of the first message with an open transaction. * * @throws IllegalArgumentException if {@code partitions} is {@code null} * @throws IllegalStateException if any of the provided partitions are not currently assigned to this consumer */ @Override public void seekToEnd(Collection<TopicPartition> partitions) { if (partitions == null) throw new IllegalArgumentException("Partitions collection cannot be null"); acquireAndEnsureOpen(); try { Collection<TopicPartition> parts = partitions.size() == 0 ? this.subscriptions.assignedPartitions() : partitions; subscriptions.requestOffsetReset(parts, OffsetResetStrategy.LATEST); } finally { release(); } } /** * Get the offset of the <i>next record</i> that will be fetched (if a record with that offset exists). * This method may issue a remote call to the server if there is no current position for the given partition. * <p> * This call will block until either the position could be determined or an unrecoverable error is * encountered (in which case it is thrown to the caller), or the timeout specified by {@code default.api.timeout.ms} expires * (in which case a {@link org.apache.kafka.common.errors.TimeoutException} is thrown to the caller). * * @param partition The partition to get the position for * @return The current position of the consumer (that is, the offset of the next record to be fetched) * @throws IllegalStateException if the provided TopicPartition is not assigned to this consumer * @throws org.apache.kafka.clients.consumer.InvalidOffsetException if no offset is currently defined for * the partition * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the * configured groupId. See the exception for more details * @throws org.apache.kafka.common.errors.UnsupportedVersionException if the consumer attempts to fetch stable offsets * when the broker doesn't support this feature * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors * @throws org.apache.kafka.common.errors.TimeoutException if the position cannot be determined before the * timeout specified by {@code default.api.timeout.ms} expires */ @Override public long position(TopicPartition partition) { return position(partition, Duration.ofMillis(defaultApiTimeoutMs)); } /** * Get the offset of the <i>next record</i> that will be fetched (if a record with that offset exists). * This method may issue a remote call to the server if there is no current position * for the given partition. * <p> * This call will block until the position can be determined, an unrecoverable error is * encountered (in which case it is thrown to the caller), or the timeout expires. * * @param partition The partition to get the position for * @param timeout The maximum amount of time to await determination of the current position * @return The current position of the consumer (that is, the offset of the next record to be fetched) * @throws IllegalStateException if the provided TopicPartition is not assigned to this consumer * @throws org.apache.kafka.clients.consumer.InvalidOffsetException if no offset is currently defined for * the partition * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.TimeoutException if the position cannot be determined before the * passed timeout expires * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the * configured groupId. See the exception for more details * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors */ @Override public long position(TopicPartition partition, final Duration timeout) { acquireAndEnsureOpen(); try { if (!this.subscriptions.isAssigned(partition)) throw new IllegalStateException("You can only check the position for partitions assigned to this consumer."); Timer timer = time.timer(timeout); do { SubscriptionState.FetchPosition position = this.subscriptions.validPosition(partition); if (position != null) return position.offset; updateFetchPositions(timer); client.poll(timer); } while (timer.notExpired()); throw new TimeoutException("Timeout of " + timeout.toMillis() + "ms expired before the position " + "for partition " + partition + " could be determined"); } finally { release(); } } /** * Get the last committed offset for the given partition (whether the commit happened by this process or * another). This offset will be used as the position for the consumer in the event of a failure. * <p> * This call will do a remote call to get the latest committed offset from the server, and will block until the * committed offset is gotten successfully, an unrecoverable error is encountered (in which case it is thrown to * the caller), or the timeout specified by {@code default.api.timeout.ms} expires (in which case a * {@link org.apache.kafka.common.errors.TimeoutException} is thrown to the caller). * * @param partition The partition to check * @return The last committed offset and metadata or null if there was no prior commit * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the * configured groupId. See the exception for more details * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors * @throws org.apache.kafka.common.errors.TimeoutException if the committed offset cannot be found before * the timeout specified by {@code default.api.timeout.ms} expires. * * @deprecated since 2.4 Use {@link #committed(Set)} instead */ @Deprecated @Override public OffsetAndMetadata committed(TopicPartition partition) { return committed(partition, Duration.ofMillis(defaultApiTimeoutMs)); } /** * Get the last committed offset for the given partition (whether the commit happened by this process or * another). This offset will be used as the position for the consumer in the event of a failure. * <p> * This call will block until the position can be determined, an unrecoverable error is * encountered (in which case it is thrown to the caller), or the timeout expires. * * @param partition The partition to check * @param timeout The maximum amount of time to await the current committed offset * @return The last committed offset and metadata or null if there was no prior commit * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the * configured groupId. See the exception for more details * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors * @throws org.apache.kafka.common.errors.TimeoutException if the committed offset cannot be found before * expiration of the timeout * * @deprecated since 2.4 Use {@link #committed(Set, Duration)} instead */ @Deprecated @Override public OffsetAndMetadata committed(TopicPartition partition, final Duration timeout) { return committed(Collections.singleton(partition), timeout).get(partition); } /** * Get the last committed offsets for the given partitions (whether the commit happened by this process or * another). The returned offsets will be used as the position for the consumer in the event of a failure. * <p> * If any of the partitions requested do not exist, an exception would be thrown. * <p> * This call will do a remote call to get the latest committed offsets from the server, and will block until the * committed offsets are gotten successfully, an unrecoverable error is encountered (in which case it is thrown to * the caller), or the timeout specified by {@code default.api.timeout.ms} expires (in which case a * {@link org.apache.kafka.common.errors.TimeoutException} is thrown to the caller). * * @param partitions The partitions to check * @return The latest committed offsets for the given partitions; {@code null} will be returned for the * partition if there is no such message. * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the * configured groupId. See the exception for more details * @throws org.apache.kafka.common.errors.UnsupportedVersionException if the consumer attempts to fetch stable offsets * when the broker doesn't support this feature * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors * @throws org.apache.kafka.common.errors.TimeoutException if the committed offset cannot be found before * the timeout specified by {@code default.api.timeout.ms} expires. */ @Override public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) { return committed(partitions, Duration.ofMillis(defaultApiTimeoutMs)); } /** * Get the last committed offsets for the given partitions (whether the commit happened by this process or * another). The returned offsets will be used as the position for the consumer in the event of a failure. * <p> * If any of the partitions requested do not exist, an exception would be thrown. * <p> * This call will block to do a remote call to get the latest committed offsets from the server. * * @param partitions The partitions to check * @param timeout The maximum amount of time to await the latest committed offsets * @return The latest committed offsets for the given partitions; {@code null} will be returned for the * partition if there is no such message. * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the * configured groupId. See the exception for more details * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors * @throws org.apache.kafka.common.errors.TimeoutException if the committed offset cannot be found before * expiration of the timeout */ @Override public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions, final Duration timeout) { acquireAndEnsureOpen(); long start = time.nanoseconds(); try { maybeThrowInvalidGroupIdException(); final Map<TopicPartition, OffsetAndMetadata> offsets; offsets = coordinator.fetchCommittedOffsets(partitions, time.timer(timeout)); if (offsets == null) { throw new TimeoutException("Timeout of " + timeout.toMillis() + "ms expired before the last " + "committed offset for partitions " + partitions + " could be determined. Try tuning default.api.timeout.ms " + "larger to relax the threshold."); } else { offsets.forEach(this::updateLastSeenEpochIfNewer); return offsets; } } finally { kafkaConsumerMetrics.recordCommitted(time.nanoseconds() - start); release(); } } /** * Get the metrics kept by the consumer */ @Override public Map<MetricName, ? extends Metric> metrics() { return Collections.unmodifiableMap(this.metrics.metrics()); } /** * Get metadata about the partitions for a given topic. This method will issue a remote call to the server if it * does not already have any metadata about the given topic. * * @param topic The topic to get partition metadata for * * @return The list of partitions, which will be empty when the given topic is not found * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the specified topic. See the exception for more details * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before * the amount of time allocated by {@code default.api.timeout.ms} expires. */ @Override public List<PartitionInfo> partitionsFor(String topic) { return partitionsFor(topic, Duration.ofMillis(defaultApiTimeoutMs)); } /** * Get metadata about the partitions for a given topic. This method will issue a remote call to the server if it * does not already have any metadata about the given topic. * * @param topic The topic to get partition metadata for * @param timeout The maximum of time to await topic metadata * * @return The list of partitions, which will be empty when the given topic is not found * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the specified topic. See * the exception for more details * @throws org.apache.kafka.common.errors.TimeoutException if topic metadata cannot be fetched before expiration * of the passed timeout * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors */ @Override public List<PartitionInfo> partitionsFor(String topic, Duration timeout) { acquireAndEnsureOpen(); try { Cluster cluster = this.metadata.fetch(); List<PartitionInfo> parts = cluster.partitionsForTopic(topic); if (!parts.isEmpty()) return parts; Timer timer = time.timer(timeout); List<PartitionInfo> topicMetadata = topicMetadataFetcher.getTopicMetadata(topic, metadata.allowAutoTopicCreation(), timer); return topicMetadata != null ? topicMetadata : Collections.emptyList(); } finally { release(); } } /** * Get metadata about partitions for all topics that the user is authorized to view. This method will issue a * remote call to the server. * @return The map of topics and its partitions * * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before * the amount of time allocated by {@code default.api.timeout.ms} expires. */ @Override public Map<String, List<PartitionInfo>> listTopics() { return listTopics(Duration.ofMillis(defaultApiTimeoutMs)); } /** * Get metadata about partitions for all topics that the user is authorized to view. This method will issue a * remote call to the server. * * @param timeout The maximum time this operation will block to fetch topic metadata * * @return The map of topics and its partitions * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.TimeoutException if the topic metadata could not be fetched before * expiration of the passed timeout * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors */ @Override public Map<String, List<PartitionInfo>> listTopics(Duration timeout) { acquireAndEnsureOpen(); try { return topicMetadataFetcher.getAllTopicMetadata(time.timer(timeout)); } finally { release(); } } /** * Suspend fetching from the requested partitions. Future calls to {@link #poll(Duration)} will not return * any records from these partitions until they have been resumed using {@link #resume(Collection)}. * Note that this method does not affect partition subscription. In particular, it does not cause a group * rebalance when automatic assignment is used. * * Note: Rebalance will not preserve the pause/resume state. * @param partitions The partitions which should be paused * @throws IllegalStateException if any of the provided partitions are not currently assigned to this consumer */ @Override public void pause(Collection<TopicPartition> partitions) { acquireAndEnsureOpen(); try { log.debug("Pausing partitions {}", partitions); for (TopicPartition partition: partitions) { subscriptions.pause(partition); } } finally { release(); } } /** * Resume specified partitions which have been paused with {@link #pause(Collection)}. New calls to * {@link #poll(Duration)} will return records from these partitions if there are any to be fetched. * If the partitions were not previously paused, this method is a no-op. * @param partitions The partitions which should be resumed * @throws IllegalStateException if any of the provided partitions are not currently assigned to this consumer */ @Override public void resume(Collection<TopicPartition> partitions) { acquireAndEnsureOpen(); try { log.debug("Resuming partitions {}", partitions); for (TopicPartition partition: partitions) { subscriptions.resume(partition); } } finally { release(); } } /** * Get the set of partitions that were previously paused by a call to {@link #pause(Collection)}. * * @return The set of paused partitions */ @Override public Set<TopicPartition> paused() { acquireAndEnsureOpen(); try { return Collections.unmodifiableSet(subscriptions.pausedPartitions()); } finally { release(); } } /** * Look up the offsets for the given partitions by timestamp. The returned offset for each partition is the * earliest offset whose timestamp is greater than or equal to the given timestamp in the corresponding partition. * * This is a blocking call. The consumer does not have to be assigned the partitions. * If the message format version in a partition is before 0.10.0, i.e. the messages do not have timestamps, null * will be returned for that partition. * * @param timestampsToSearch the mapping from partition to the timestamp to look up. * * @return a mapping from partition to the timestamp and offset of the first message with timestamp greater * than or equal to the target timestamp. {@code null} will be returned for the partition if there is no * such message. * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws IllegalArgumentException if the target timestamp is negative * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before * the amount of time allocated by {@code default.api.timeout.ms} expires. * @throws org.apache.kafka.common.errors.UnsupportedVersionException if the broker does not support looking up * the offsets by timestamp */ @Override public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) { return offsetsForTimes(timestampsToSearch, Duration.ofMillis(defaultApiTimeoutMs)); } /** * Look up the offsets for the given partitions by timestamp. The returned offset for each partition is the * earliest offset whose timestamp is greater than or equal to the given timestamp in the corresponding partition. * * This is a blocking call. The consumer does not have to be assigned the partitions. * If the message format version in a partition is before 0.10.0, i.e. the messages do not have timestamps, null * will be returned for that partition. * * @param timestampsToSearch the mapping from partition to the timestamp to look up. * @param timeout The maximum amount of time to await retrieval of the offsets * * @return a mapping from partition to the timestamp and offset of the first message with timestamp greater * than or equal to the target timestamp. {@code null} will be returned for the partition if there is no * such message. * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws IllegalArgumentException if the target timestamp is negative * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before * expiration of the passed timeout * @throws org.apache.kafka.common.errors.UnsupportedVersionException if the broker does not support looking up * the offsets by timestamp */ @Override public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch, Duration timeout) { acquireAndEnsureOpen(); try { for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) { // we explicitly exclude the earliest and latest offset here so the timestamp in the returned // OffsetAndTimestamp is always positive. if (entry.getValue() < 0) throw new IllegalArgumentException("The target time for partition " + entry.getKey() + " is " + entry.getValue() + ". The target time cannot be negative."); } return offsetFetcher.offsetsForTimes(timestampsToSearch, time.timer(timeout)); } finally { release(); } } /** * Get the first offset for the given partitions. * <p> * This method does not change the current consumer position of the partitions. * * @see #seekToBeginning(Collection) * * @param partitions the partitions to get the earliest offsets. * @return The earliest available offsets for the given partitions * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before * expiration of the configured {@code default.api.timeout.ms} */ @Override public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions) { return beginningOffsets(partitions, Duration.ofMillis(defaultApiTimeoutMs)); } /** * Get the first offset for the given partitions. * <p> * This method does not change the current consumer position of the partitions. * * @see #seekToBeginning(Collection) * * @param partitions the partitions to get the earliest offsets * @param timeout The maximum amount of time to await retrieval of the beginning offsets * * @return The earliest available offsets for the given partitions * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before * expiration of the passed timeout */ @Override public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, Duration timeout) { acquireAndEnsureOpen(); try { return offsetFetcher.beginningOffsets(partitions, time.timer(timeout)); } finally { release(); } } /** * Get the end offsets for the given partitions. In the default {@code read_uncommitted} isolation level, the end * offset is the high watermark (that is, the offset of the last successfully replicated message plus one). For * {@code read_committed} consumers, the end offset is the last stable offset (LSO), which is the minimum of * the high watermark and the smallest offset of any open transaction. Finally, if the partition has never been * written to, the end offset is 0. * * <p> * This method does not change the current consumer position of the partitions. * * @see #seekToEnd(Collection) * * @param partitions the partitions to get the end offsets. * @return The end offsets for the given partitions. * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before * the amount of time allocated by {@code default.api.timeout.ms} expires */ @Override public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions) { return endOffsets(partitions, Duration.ofMillis(defaultApiTimeoutMs)); } /** * Get the end offsets for the given partitions. In the default {@code read_uncommitted} isolation level, the end * offset is the high watermark (that is, the offset of the last successfully replicated message plus one). For * {@code read_committed} consumers, the end offset is the last stable offset (LSO), which is the minimum of * the high watermark and the smallest offset of any open transaction. Finally, if the partition has never been * written to, the end offset is 0. * * <p> * This method does not change the current consumer position of the partitions. * * @see #seekToEnd(Collection) * * @param partitions the partitions to get the end offsets. * @param timeout The maximum amount of time to await retrieval of the end offsets * * @return The end offsets for the given partitions. * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws org.apache.kafka.common.errors.TimeoutException if the offsets could not be fetched before * expiration of the passed timeout */ @Override public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, Duration timeout) { acquireAndEnsureOpen(); try { return offsetFetcher.endOffsets(partitions, time.timer(timeout)); } finally { release(); } } /** * Get the consumer's current lag on the partition. Returns an "empty" {@link OptionalLong} if the lag is not known, * for example if there is no position yet, or if the end offset is not known yet. * * <p> * This method uses locally cached metadata and never makes a remote call. * * @param topicPartition The partition to get the lag for. * * @return This {@code Consumer} instance's current lag for the given partition. * * @throws IllegalStateException if the {@code topicPartition} is not assigned **/ @Override public OptionalLong currentLag(TopicPartition topicPartition) { acquireAndEnsureOpen(); try { final Long lag = subscriptions.partitionLag(topicPartition, isolationLevel); // if the log end offset is not known and hence cannot return lag and there is // no in-flight list offset requested yet, // issue a list offset request for that partition so that next time // we may get the answer; we do not need to wait for the return value // since we would not try to poll the network client synchronously if (lag == null) { if (subscriptions.partitionEndOffset(topicPartition, isolationLevel) == null && !subscriptions.partitionEndOffsetRequested(topicPartition)) { log.info("Requesting the log end offset for {} in order to compute lag", topicPartition); subscriptions.requestPartitionEndOffset(topicPartition); offsetFetcher.endOffsets(Collections.singleton(topicPartition), time.timer(0L)); } return OptionalLong.empty(); } return OptionalLong.of(lag); } finally { release(); } } /** * Return the current group metadata associated with this consumer. * * @return consumer group metadata * @throws org.apache.kafka.common.errors.InvalidGroupIdException if consumer does not have a group */ @Override public ConsumerGroupMetadata groupMetadata() { acquireAndEnsureOpen(); try { maybeThrowInvalidGroupIdException(); return coordinator.groupMetadata(); } finally { release(); } } /** * Alert the consumer to trigger a new rebalance by rejoining the group. This is a nonblocking call that forces * the consumer to trigger a new rebalance on the next {@link #poll(Duration)} call. Note that this API does not * itself initiate the rebalance, so you must still call {@link #poll(Duration)}. If a rebalance is already in * progress this call will be a no-op. If you wish to force an additional rebalance you must complete the current * one by calling poll before retrying this API. * <p> * You do not need to call this during normal processing, as the consumer group will manage itself * automatically and rebalance when necessary. However there may be situations where the application wishes to * trigger a rebalance that would otherwise not occur. For example, if some condition external and invisible to * the Consumer and its group changes in a way that would affect the userdata encoded in the * {@link org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription Subscription}, the Consumer * will not be notified and no rebalance will occur. This API can be used to force the group to rebalance so that * the assignor can perform a partition reassignment based on the latest userdata. If your assignor does not use * this userdata, or you do not use a custom * {@link org.apache.kafka.clients.consumer.ConsumerPartitionAssignor ConsumerPartitionAssignor}, you should not * use this API. * * @param reason The reason why the new rebalance is needed. * * @throws java.lang.IllegalStateException if the consumer does not use group subscription */ @Override public void enforceRebalance(final String reason) { acquireAndEnsureOpen(); try { if (coordinator == null) { throw new IllegalStateException("Tried to force a rebalance but consumer does not have a group."); } coordinator.requestRejoin(reason == null || reason.isEmpty() ? DEFAULT_REASON : reason); } finally { release(); } } /** * @see #enforceRebalance(String) */ @Override public void enforceRebalance() { enforceRebalance(null); } /** * Close the consumer, waiting for up to the default timeout of 30 seconds for any needed cleanup. * If auto-commit is enabled, this will commit the current offsets if possible within the default * timeout. See {@link #close(Duration)} for details. Note that {@link #wakeup()} * cannot be used to interrupt close. * * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted * before or while this function is called * @throws org.apache.kafka.common.KafkaException for any other error during close */ @Override public void close() { close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); } /** * Tries to close the consumer cleanly within the specified timeout. This method waits up to * {@code timeout} for the consumer to complete pending commits and leave the group. * If auto-commit is enabled, this will commit the current offsets if possible within the * timeout. If the consumer is unable to complete offset commits and gracefully leave the group * before the timeout expires, the consumer is force closed. Note that {@link #wakeup()} cannot be * used to interrupt close. * * @param timeout The maximum time to wait for consumer to close gracefully. The value must be * non-negative. Specifying a timeout of zero means do not wait for pending requests to complete. * * @throws IllegalArgumentException If the {@code timeout} is negative. * @throws InterruptException If the thread is interrupted before or while this function is called * @throws org.apache.kafka.common.KafkaException for any other error during close */ @Override public void close(Duration timeout) { if (timeout.toMillis() < 0) throw new IllegalArgumentException("The timeout cannot be negative."); acquire(); try { if (!closed) { // need to close before setting the flag since the close function // itself may trigger rebalance callback that needs the consumer to be open still close(timeout, false); } } finally { closed = true; release(); } } /** * Wakeup the consumer. This method is thread-safe and is useful in particular to abort a long poll. * The thread which is blocking in an operation will throw {@link org.apache.kafka.common.errors.WakeupException}. * If no thread is blocking in a method which can throw {@link org.apache.kafka.common.errors.WakeupException}, the next call to such a method will raise it instead. */ @Override public void wakeup() { this.client.wakeup(); } private ClusterResourceListeners configureClusterResourceListeners(Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, List<?>... candidateLists) { ClusterResourceListeners clusterResourceListeners = new ClusterResourceListeners(); for (List<?> candidateList: candidateLists) clusterResourceListeners.maybeAddAll(candidateList); clusterResourceListeners.maybeAdd(keyDeserializer); clusterResourceListeners.maybeAdd(valueDeserializer); return clusterResourceListeners; } private Timer createTimerForRequest(final Duration timeout) { // this.time could be null if an exception occurs in constructor prior to setting the this.time field final Time localTime = (time == null) ? Time.SYSTEM : time; return localTime.timer(Math.min(timeout.toMillis(), requestTimeoutMs)); } private void close(Duration timeout, boolean swallowException) { log.trace("Closing the Kafka consumer"); AtomicReference<Throwable> firstException = new AtomicReference<>(); final Timer closeTimer = createTimerForRequest(timeout); // Close objects with a timeout. The timeout is required because the coordinator & the fetcher send requests to // the server in the process of closing which may not respect the overall timeout defined for closing the // consumer. if (coordinator != null) { // This is a blocking call bound by the time remaining in closeTimer Utils.swallow(log, Level.ERROR, "Failed to close coordinator with a timeout(ms)=" + closeTimer.timeoutMs(), () -> coordinator.close(closeTimer), firstException); } if (fetcher != null) { // the timeout for the session close is at-most the requestTimeoutMs long remainingDurationInTimeout = Math.max(0, timeout.toMillis() - closeTimer.elapsedMs()); if (remainingDurationInTimeout > 0) { remainingDurationInTimeout = Math.min(requestTimeoutMs, remainingDurationInTimeout); } closeTimer.reset(remainingDurationInTimeout); // This is a blocking call bound by the time remaining in closeTimer Utils.swallow(log, Level.ERROR, "Failed to close fetcher with a timeout(ms)=" + closeTimer.timeoutMs(), () -> fetcher.close(closeTimer), firstException); } Utils.closeQuietly(interceptors, "consumer interceptors", firstException); Utils.closeQuietly(kafkaConsumerMetrics, "kafka consumer metrics", firstException); Utils.closeQuietly(metrics, "consumer metrics", firstException); Utils.closeQuietly(client, "consumer network client", firstException); Utils.closeQuietly(keyDeserializer, "consumer key deserializer", firstException); Utils.closeQuietly(valueDeserializer, "consumer value deserializer", firstException); AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics); log.debug("Kafka consumer has been closed"); Throwable exception = firstException.get(); if (exception != null && !swallowException) { if (exception instanceof InterruptException) { throw (InterruptException) exception; } throw new KafkaException("Failed to close kafka consumer", exception); } } /** * Set the fetch position to the committed position (if there is one) * or reset it using the offset reset policy the user has configured. * * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws NoOffsetForPartitionException If no offset is stored for a given partition and no offset reset policy is * defined * @return true iff the operation completed without timing out */ private boolean updateFetchPositions(final Timer timer) { // If any partitions have been truncated due to a leader change, we need to validate the offsets offsetFetcher.validatePositionsIfNeeded(); cachedSubscriptionHasAllFetchPositions = subscriptions.hasAllFetchPositions(); if (cachedSubscriptionHasAllFetchPositions) return true; // If there are any partitions which do not have a valid position and are not // awaiting reset, then we need to fetch committed offsets. We will only do a // coordinator lookup if there are partitions which have missing positions, so // a consumer with manually assigned partitions can avoid a coordinator dependence // by always ensuring that assigned partitions have an initial position. if (coordinator != null && !coordinator.refreshCommittedOffsetsIfNeeded(timer)) return false; // If there are partitions still needing a position and a reset policy is defined, // request reset using the default policy. If no reset strategy is defined and there // are partitions with a missing position, then we will raise an exception. subscriptions.resetInitializingPositions(); // Finally send an asynchronous request to look up and update the positions of any // partitions which are awaiting reset. offsetFetcher.resetPositionsIfNeeded(); return true; } /** * Acquire the light lock and ensure that the consumer hasn't been closed. * @throws IllegalStateException If the consumer has been closed */ private void acquireAndEnsureOpen() { acquire(); if (this.closed) { release(); throw new IllegalStateException("This consumer has already been closed."); } } /** * Acquire the light lock protecting this consumer from multi-threaded access. Instead of blocking * when the lock is not available, however, we just throw an exception (since multi-threaded usage is not * supported). * @throws ConcurrentModificationException if another thread already has the lock */ private void acquire() { final Thread thread = Thread.currentThread(); final long threadId = thread.getId(); if (threadId != currentThread.get() && !currentThread.compareAndSet(NO_CURRENT_THREAD, threadId)) throw new ConcurrentModificationException("KafkaConsumer is not safe for multi-threaded access. " + "currentThread(name: " + thread.getName() + ", id: " + threadId + ")" + " otherThread(id: " + currentThread.get() + ")" ); refcount.incrementAndGet(); } /** * Release the light lock protecting the consumer from multi-threaded access. */ private void release() { if (refcount.decrementAndGet() == 0) currentThread.set(NO_CURRENT_THREAD); } private void throwIfNoAssignorsConfigured() { if (assignors.isEmpty()) throw new IllegalStateException("Must configure at least one partition assigner class name to " + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG + " configuration property"); } private void maybeThrowInvalidGroupIdException() { if (!groupId.isPresent()) throw new InvalidGroupIdException("To use the group management or offset commit APIs, you must " + "provide a valid " + ConsumerConfig.GROUP_ID_CONFIG + " in the consumer configuration."); } private void updateLastSeenEpochIfNewer(TopicPartition topicPartition, OffsetAndMetadata offsetAndMetadata) { if (offsetAndMetadata != null) offsetAndMetadata.leaderEpoch().ifPresent(epoch -> metadata.updateLastSeenEpochIfNewer(topicPartition, epoch)); } // Functions below are for testing only String getClientId() { return clientId; } boolean updateAssignmentMetadataIfNeeded(final Timer timer) { return updateAssignmentMetadataIfNeeded(timer, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/LogTruncationException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.TopicPartition; import java.util.Collections; import java.util.Map; /** * In the event of an unclean leader election, the log will be truncated, * previously committed data will be lost, and new data will be written * over these offsets. When this happens, the consumer will detect the * truncation and raise this exception (if no automatic reset policy * has been defined) with the first offset known to diverge from what the * consumer previously read. */ public class LogTruncationException extends OffsetOutOfRangeException { private final Map<TopicPartition, OffsetAndMetadata> divergentOffsets; public LogTruncationException(String message, Map<TopicPartition, Long> fetchOffsets, Map<TopicPartition, OffsetAndMetadata> divergentOffsets) { super(message, fetchOffsets); this.divergentOffsets = Collections.unmodifiableMap(divergentOffsets); } /** * Get the divergent offsets for the partitions which were truncated. For each * partition, this is the first offset which is known to diverge from what the * consumer read. * * Note that there is no guarantee that this offset will be known. It is necessary * to use {@link #partitions()} to see the set of partitions that were truncated * and then check for the presence of a divergent offset in this map. */ public Map<TopicPartition, OffsetAndMetadata> divergentOffsets() { return divergentOffsets; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/MockConsumer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener; import org.apache.kafka.clients.consumer.internals.SubscriptionState; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.utils.LogContext; import java.time.Duration; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.OptionalLong; import java.util.Queue; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Pattern; import java.util.stream.Collectors; import static java.util.Collections.singleton; import static org.apache.kafka.clients.consumer.KafkaConsumer.DEFAULT_CLOSE_TIMEOUT_MS; /** * A mock of the {@link Consumer} interface you can use for testing code that uses Kafka. This class is <i> not * threadsafe </i>. However, you can use the {@link #schedulePollTask(Runnable)} method to write multithreaded tests * where a driver thread waits for {@link #poll(Duration)} to be called by a background thread and then can safely perform * operations during a callback. */ public class MockConsumer<K, V> implements Consumer<K, V> { private final Map<String, List<PartitionInfo>> partitions; private final SubscriptionState subscriptions; private final Map<TopicPartition, Long> beginningOffsets; private final Map<TopicPartition, Long> endOffsets; private final Map<TopicPartition, OffsetAndMetadata> committed; private final Queue<Runnable> pollTasks; private final Set<TopicPartition> paused; private Map<TopicPartition, List<ConsumerRecord<K, V>>> records; private KafkaException pollException; private KafkaException offsetsException; private AtomicBoolean wakeup; private Duration lastPollTimeout; private boolean closed; private boolean shouldRebalance; public MockConsumer(OffsetResetStrategy offsetResetStrategy) { this.subscriptions = new SubscriptionState(new LogContext(), offsetResetStrategy); this.partitions = new HashMap<>(); this.records = new HashMap<>(); this.paused = new HashSet<>(); this.closed = false; this.beginningOffsets = new HashMap<>(); this.endOffsets = new HashMap<>(); this.pollTasks = new LinkedList<>(); this.pollException = null; this.wakeup = new AtomicBoolean(false); this.committed = new HashMap<>(); this.shouldRebalance = false; } @Override public synchronized Set<TopicPartition> assignment() { return this.subscriptions.assignedPartitions(); } /** Simulate a rebalance event. */ public synchronized void rebalance(Collection<TopicPartition> newAssignment) { // TODO: Rebalance callbacks this.records.clear(); this.subscriptions.assignFromSubscribed(newAssignment); } @Override public synchronized Set<String> subscription() { return this.subscriptions.subscription(); } @Override public synchronized void subscribe(Collection<String> topics) { subscribe(topics, new NoOpConsumerRebalanceListener()); } @Override public synchronized void subscribe(Pattern pattern, final ConsumerRebalanceListener listener) { ensureNotClosed(); committed.clear(); this.subscriptions.subscribe(pattern, listener); Set<String> topicsToSubscribe = new HashSet<>(); for (String topic: partitions.keySet()) { if (pattern.matcher(topic).matches() && !subscriptions.subscription().contains(topic)) topicsToSubscribe.add(topic); } ensureNotClosed(); this.subscriptions.subscribeFromPattern(topicsToSubscribe); final Set<TopicPartition> assignedPartitions = new HashSet<>(); for (final String topic : topicsToSubscribe) { for (final PartitionInfo info : this.partitions.get(topic)) { assignedPartitions.add(new TopicPartition(topic, info.partition())); } } subscriptions.assignFromSubscribed(assignedPartitions); } @Override public synchronized void subscribe(Pattern pattern) { subscribe(pattern, new NoOpConsumerRebalanceListener()); } @Override public synchronized void subscribe(Collection<String> topics, final ConsumerRebalanceListener listener) { ensureNotClosed(); committed.clear(); this.subscriptions.subscribe(new HashSet<>(topics), listener); } @Override public synchronized void assign(Collection<TopicPartition> partitions) { ensureNotClosed(); committed.clear(); this.subscriptions.assignFromUser(new HashSet<>(partitions)); } @Override public synchronized void unsubscribe() { ensureNotClosed(); committed.clear(); subscriptions.unsubscribe(); } @Deprecated @Override public synchronized ConsumerRecords<K, V> poll(long timeout) { return poll(Duration.ofMillis(timeout)); } @Override public synchronized ConsumerRecords<K, V> poll(final Duration timeout) { ensureNotClosed(); lastPollTimeout = timeout; // Synchronize around the entire execution so new tasks to be triggered on subsequent poll calls can be added in // the callback synchronized (pollTasks) { Runnable task = pollTasks.poll(); if (task != null) task.run(); } if (wakeup.get()) { wakeup.set(false); throw new WakeupException(); } if (pollException != null) { RuntimeException exception = this.pollException; this.pollException = null; throw exception; } // Handle seeks that need to wait for a poll() call to be processed for (TopicPartition tp : subscriptions.assignedPartitions()) if (!subscriptions.hasValidPosition(tp)) updateFetchPosition(tp); // update the consumed offset final Map<TopicPartition, List<ConsumerRecord<K, V>>> results = new HashMap<>(); final List<TopicPartition> toClear = new ArrayList<>(); for (Map.Entry<TopicPartition, List<ConsumerRecord<K, V>>> entry : this.records.entrySet()) { if (!subscriptions.isPaused(entry.getKey())) { final List<ConsumerRecord<K, V>> recs = entry.getValue(); for (final ConsumerRecord<K, V> rec : recs) { long position = subscriptions.position(entry.getKey()).offset; if (beginningOffsets.get(entry.getKey()) != null && beginningOffsets.get(entry.getKey()) > position) { throw new OffsetOutOfRangeException(Collections.singletonMap(entry.getKey(), position)); } if (assignment().contains(entry.getKey()) && rec.offset() >= position) { results.computeIfAbsent(entry.getKey(), partition -> new ArrayList<>()).add(rec); Metadata.LeaderAndEpoch leaderAndEpoch = new Metadata.LeaderAndEpoch(Optional.empty(), rec.leaderEpoch()); SubscriptionState.FetchPosition newPosition = new SubscriptionState.FetchPosition( rec.offset() + 1, rec.leaderEpoch(), leaderAndEpoch); subscriptions.position(entry.getKey(), newPosition); } } toClear.add(entry.getKey()); } } toClear.forEach(p -> this.records.remove(p)); return new ConsumerRecords<>(results); } public synchronized void addRecord(ConsumerRecord<K, V> record) { ensureNotClosed(); TopicPartition tp = new TopicPartition(record.topic(), record.partition()); Set<TopicPartition> currentAssigned = this.subscriptions.assignedPartitions(); if (!currentAssigned.contains(tp)) throw new IllegalStateException("Cannot add records for a partition that is not assigned to the consumer"); List<ConsumerRecord<K, V>> recs = this.records.computeIfAbsent(tp, k -> new ArrayList<>()); recs.add(record); } /** * @deprecated Use {@link #setPollException(KafkaException)} instead */ @Deprecated public synchronized void setException(KafkaException exception) { setPollException(exception); } public synchronized void setPollException(KafkaException exception) { this.pollException = exception; } public synchronized void setOffsetsException(KafkaException exception) { this.offsetsException = exception; } @Override public synchronized void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback) { ensureNotClosed(); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) committed.put(entry.getKey(), entry.getValue()); if (callback != null) { callback.onComplete(offsets, null); } } @Override public synchronized void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) { commitAsync(offsets, null); } @Override public synchronized void commitAsync() { commitAsync(null); } @Override public synchronized void commitAsync(OffsetCommitCallback callback) { ensureNotClosed(); commitAsync(this.subscriptions.allConsumed(), callback); } @Override public synchronized void commitSync() { commitSync(this.subscriptions.allConsumed()); } @Override public synchronized void commitSync(Duration timeout) { commitSync(this.subscriptions.allConsumed()); } @Override public void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets, final Duration timeout) { commitSync(offsets); } @Override public synchronized void seek(TopicPartition partition, long offset) { ensureNotClosed(); subscriptions.seek(partition, offset); } @Override public void seek(TopicPartition partition, OffsetAndMetadata offsetAndMetadata) { ensureNotClosed(); subscriptions.seek(partition, offsetAndMetadata.offset()); } @Deprecated @Override public synchronized OffsetAndMetadata committed(final TopicPartition partition) { return committed(singleton(partition)).get(partition); } @Deprecated @Override public OffsetAndMetadata committed(final TopicPartition partition, final Duration timeout) { return committed(partition); } @Override public synchronized Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) { ensureNotClosed(); return partitions.stream() .filter(committed::containsKey) .collect(Collectors.toMap(tp -> tp, tp -> subscriptions.isAssigned(tp) ? committed.get(tp) : new OffsetAndMetadata(0))); } @Override public synchronized Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions, final Duration timeout) { return committed(partitions); } @Override public synchronized long position(TopicPartition partition) { ensureNotClosed(); if (!this.subscriptions.isAssigned(partition)) throw new IllegalArgumentException("You can only check the position for partitions assigned to this consumer."); SubscriptionState.FetchPosition position = this.subscriptions.position(partition); if (position == null) { updateFetchPosition(partition); position = this.subscriptions.position(partition); } return position.offset; } @Override public synchronized long position(TopicPartition partition, final Duration timeout) { return position(partition); } @Override public synchronized void seekToBeginning(Collection<TopicPartition> partitions) { ensureNotClosed(); subscriptions.requestOffsetReset(partitions, OffsetResetStrategy.EARLIEST); } public synchronized void updateBeginningOffsets(Map<TopicPartition, Long> newOffsets) { beginningOffsets.putAll(newOffsets); } @Override public synchronized void seekToEnd(Collection<TopicPartition> partitions) { ensureNotClosed(); subscriptions.requestOffsetReset(partitions, OffsetResetStrategy.LATEST); } public synchronized void updateEndOffsets(final Map<TopicPartition, Long> newOffsets) { endOffsets.putAll(newOffsets); } @Override public synchronized Map<MetricName, ? extends Metric> metrics() { ensureNotClosed(); return Collections.emptyMap(); } @Override public synchronized List<PartitionInfo> partitionsFor(String topic) { ensureNotClosed(); return this.partitions.getOrDefault(topic, Collections.emptyList()); } @Override public synchronized Map<String, List<PartitionInfo>> listTopics() { ensureNotClosed(); return partitions; } public synchronized void updatePartitions(String topic, List<PartitionInfo> partitions) { ensureNotClosed(); this.partitions.put(topic, partitions); } @Override public synchronized void pause(Collection<TopicPartition> partitions) { for (TopicPartition partition : partitions) { subscriptions.pause(partition); paused.add(partition); } } @Override public synchronized void resume(Collection<TopicPartition> partitions) { for (TopicPartition partition : partitions) { subscriptions.resume(partition); paused.remove(partition); } } @Override public synchronized Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) { throw new UnsupportedOperationException("Not implemented yet."); } @Override public synchronized Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions) { if (offsetsException != null) { RuntimeException exception = this.offsetsException; this.offsetsException = null; throw exception; } Map<TopicPartition, Long> result = new HashMap<>(); for (TopicPartition tp : partitions) { Long beginningOffset = beginningOffsets.get(tp); if (beginningOffset == null) throw new IllegalStateException("The partition " + tp + " does not have a beginning offset."); result.put(tp, beginningOffset); } return result; } @Override public synchronized Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions) { if (offsetsException != null) { RuntimeException exception = this.offsetsException; this.offsetsException = null; throw exception; } Map<TopicPartition, Long> result = new HashMap<>(); for (TopicPartition tp : partitions) { Long endOffset = endOffsets.get(tp); if (endOffset == null) throw new IllegalStateException("The partition " + tp + " does not have an end offset."); result.put(tp, endOffset); } return result; } @Override public void close() { close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); } @Override public synchronized void close(Duration timeout) { this.closed = true; } public synchronized boolean closed() { return this.closed; } @Override public synchronized void wakeup() { wakeup.set(true); } /** * Schedule a task to be executed during a poll(). One enqueued task will be executed per {@link #poll(Duration)} * invocation. You can use this repeatedly to mock out multiple responses to poll invocations. * @param task the task to be executed */ public synchronized void schedulePollTask(Runnable task) { synchronized (pollTasks) { pollTasks.add(task); } } public synchronized void scheduleNopPollTask() { schedulePollTask(() -> { }); } public synchronized Set<TopicPartition> paused() { return Collections.unmodifiableSet(new HashSet<>(paused)); } private void ensureNotClosed() { if (this.closed) throw new IllegalStateException("This consumer has already been closed."); } private void updateFetchPosition(TopicPartition tp) { if (subscriptions.isOffsetResetNeeded(tp)) { resetOffsetPosition(tp); } else if (!committed.containsKey(tp)) { subscriptions.requestOffsetReset(tp); resetOffsetPosition(tp); } else { subscriptions.seek(tp, committed.get(tp).offset()); } } private void resetOffsetPosition(TopicPartition tp) { OffsetResetStrategy strategy = subscriptions.resetStrategy(tp); Long offset; if (strategy == OffsetResetStrategy.EARLIEST) { offset = beginningOffsets.get(tp); if (offset == null) throw new IllegalStateException("MockConsumer didn't have beginning offset specified, but tried to seek to beginning"); } else if (strategy == OffsetResetStrategy.LATEST) { offset = endOffsets.get(tp); if (offset == null) throw new IllegalStateException("MockConsumer didn't have end offset specified, but tried to seek to end"); } else { throw new NoOffsetForPartitionException(tp); } seek(tp, offset); } @Override public List<PartitionInfo> partitionsFor(String topic, Duration timeout) { return partitionsFor(topic); } @Override public Map<String, List<PartitionInfo>> listTopics(Duration timeout) { return listTopics(); } @Override public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch, Duration timeout) { return offsetsForTimes(timestampsToSearch); } @Override public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, Duration timeout) { return beginningOffsets(partitions); } @Override public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, Duration timeout) { return endOffsets(partitions); } @Override public OptionalLong currentLag(TopicPartition topicPartition) { if (endOffsets.containsKey(topicPartition)) { return OptionalLong.of(endOffsets.get(topicPartition) - position(topicPartition)); } else { // if the test doesn't bother to set an end offset, we assume it wants to model being caught up. return OptionalLong.of(0L); } } @Override public ConsumerGroupMetadata groupMetadata() { return new ConsumerGroupMetadata("dummy.group.id", 1, "1", Optional.empty()); } @Override public void enforceRebalance() { enforceRebalance(null); } @Override public void enforceRebalance(final String reason) { shouldRebalance = true; } public boolean shouldRebalance() { return shouldRebalance; } public void resetShouldRebalance() { shouldRebalance = false; } public Duration lastPollTimeout() { return lastPollTimeout; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/NoOffsetForPartitionException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.TopicPartition; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Set; /** * Indicates that there is no stored offset for a partition and no defined offset * reset policy. */ public class NoOffsetForPartitionException extends InvalidOffsetException { private static final long serialVersionUID = 1L; private final Set<TopicPartition> partitions; public NoOffsetForPartitionException(TopicPartition partition) { super("Undefined offset with no reset policy for partition: " + partition); this.partitions = Collections.singleton(partition); } public NoOffsetForPartitionException(Collection<TopicPartition> partitions) { super("Undefined offset with no reset policy for partitions: " + partitions); this.partitions = Collections.unmodifiableSet(new HashSet<>(partitions)); } /** * returns all partitions for which no offests are defined. * @return all partitions without offsets */ public Set<TopicPartition> partitions() { return partitions; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/OffsetAndMetadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.requests.OffsetFetchResponse; import java.io.Serializable; import java.util.Objects; import java.util.Optional; /** * The Kafka offset commit API allows users to provide additional metadata (in the form of a string) * when an offset is committed. This can be useful (for example) to store information about which * node made the commit, what time the commit was made, etc. */ public class OffsetAndMetadata implements Serializable { private static final long serialVersionUID = 2019555404968089681L; private final long offset; private final String metadata; // We use null to represent the absence of a leader epoch to simplify serialization. // I.e., older serializations of this class which do not have this field will automatically // initialize its value to null. private final Integer leaderEpoch; /** * Construct a new OffsetAndMetadata object for committing through {@link KafkaConsumer}. * * @param offset The offset to be committed * @param leaderEpoch Optional leader epoch of the last consumed record * @param metadata Non-null metadata */ public OffsetAndMetadata(long offset, Optional<Integer> leaderEpoch, String metadata) { if (offset < 0) throw new IllegalArgumentException("Invalid negative offset"); this.offset = offset; this.leaderEpoch = leaderEpoch.orElse(null); // The server converts null metadata to an empty string. So we store it as an empty string as well on the client // to be consistent. if (metadata == null) this.metadata = OffsetFetchResponse.NO_METADATA; else this.metadata = metadata; } /** * Construct a new OffsetAndMetadata object for committing through {@link KafkaConsumer}. * @param offset The offset to be committed * @param metadata Non-null metadata */ public OffsetAndMetadata(long offset, String metadata) { this(offset, Optional.empty(), metadata); } /** * Construct a new OffsetAndMetadata object for committing through {@link KafkaConsumer}. The metadata * associated with the commit will be empty. * @param offset The offset to be committed */ public OffsetAndMetadata(long offset) { this(offset, ""); } public long offset() { return offset; } public String metadata() { return metadata; } /** * Get the leader epoch of the previously consumed record (if one is known). Log truncation is detected * if there exists a leader epoch which is larger than this epoch and begins at an offset earlier than * the committed offset. * * @return the leader epoch or empty if not known */ public Optional<Integer> leaderEpoch() { if (leaderEpoch == null || leaderEpoch < 0) return Optional.empty(); return Optional.of(leaderEpoch); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; OffsetAndMetadata that = (OffsetAndMetadata) o; return offset == that.offset && Objects.equals(metadata, that.metadata) && Objects.equals(leaderEpoch, that.leaderEpoch); } @Override public int hashCode() { return Objects.hash(offset, metadata, leaderEpoch); } @Override public String toString() { return "OffsetAndMetadata{" + "offset=" + offset + ", leaderEpoch=" + leaderEpoch + ", metadata='" + metadata + '\'' + '}'; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/OffsetAndTimestamp.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import java.util.Objects; import java.util.Optional; /** * A container class for offset and timestamp. */ public final class OffsetAndTimestamp { private final long timestamp; private final long offset; private final Optional<Integer> leaderEpoch; public OffsetAndTimestamp(long offset, long timestamp) { this(offset, timestamp, Optional.empty()); } public OffsetAndTimestamp(long offset, long timestamp, Optional<Integer> leaderEpoch) { if (offset < 0) throw new IllegalArgumentException("Invalid negative offset"); if (timestamp < 0) throw new IllegalArgumentException("Invalid negative timestamp"); this.offset = offset; this.timestamp = timestamp; this.leaderEpoch = leaderEpoch; } public long timestamp() { return timestamp; } public long offset() { return offset; } /** * Get the leader epoch corresponding to the offset that was found (if one exists). * This can be provided to seek() to ensure that the log hasn't been truncated prior to fetching. * * @return The leader epoch or empty if it is not known */ public Optional<Integer> leaderEpoch() { return leaderEpoch; } @Override public String toString() { return "(timestamp=" + timestamp + ", leaderEpoch=" + leaderEpoch.orElse(null) + ", offset=" + offset + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; OffsetAndTimestamp that = (OffsetAndTimestamp) o; return timestamp == that.timestamp && offset == that.offset && Objects.equals(leaderEpoch, that.leaderEpoch); } @Override public int hashCode() { return Objects.hash(timestamp, offset, leaderEpoch); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/OffsetCommitCallback.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.TopicPartition; import java.time.Duration; import java.util.Collection; import java.util.Map; /** * A callback interface that the user can implement to trigger custom actions when a commit request completes. The callback * may be executed in any thread calling {@link Consumer#poll(java.time.Duration) poll()}. */ public interface OffsetCommitCallback { /** * A callback method the user can implement to provide asynchronous handling of commit request completion. * This method will be called when the commit request sent to the server has been acknowledged. * * @param offsets A map of the offsets and associated metadata that this callback applies to * @param exception The exception thrown during processing of the request, or null if the commit completed successfully * * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This can only occur if you are using automatic group management with {@link KafkaConsumer#subscribe(Collection)}, * or if there is an active group with the same groupId which is using group management. * @throws org.apache.kafka.common.errors.RebalanceInProgressException if the commit failed because * it is in the middle of a rebalance. In such cases * commit could be retried after the rebalance is completed with the {@link KafkaConsumer#poll(Duration)} call. * @throws org.apache.kafka.common.errors.WakeupException if {@link KafkaConsumer#wakeup()} is called before or while this * function is called * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while * this function is called * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the * configured groupId. See the exception for more details * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors (e.g. if offset metadata * is too large or if the committed offset is invalid). */ void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/OffsetOutOfRangeException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.TopicPartition; import java.util.Map; import java.util.Set; /** * No reset policy has been defined, and the offsets for these partitions are either larger or smaller * than the range of offsets the server has for the given partition. */ public class OffsetOutOfRangeException extends InvalidOffsetException { private static final long serialVersionUID = 1L; private final Map<TopicPartition, Long> offsetOutOfRangePartitions; public OffsetOutOfRangeException(Map<TopicPartition, Long> offsetOutOfRangePartitions) { this("Offsets out of range with no configured reset policy for partitions: " + offsetOutOfRangePartitions, offsetOutOfRangePartitions); } public OffsetOutOfRangeException(String message, Map<TopicPartition, Long> offsetOutOfRangePartitions) { super(message); this.offsetOutOfRangePartitions = offsetOutOfRangePartitions; } /** * Get a map of the topic partitions and the respective out-of-range fetch offsets. */ public Map<TopicPartition, Long> offsetOutOfRangePartitions() { return offsetOutOfRangePartitions; } @Override public Set<TopicPartition> partitions() { return offsetOutOfRangePartitions.keySet(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/OffsetResetStrategy.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import java.util.Locale; public enum OffsetResetStrategy { LATEST, EARLIEST, NONE; @Override public String toString() { return super.toString().toLowerCase(Locale.ROOT); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/RangeAssignor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignor; import org.apache.kafka.clients.consumer.internals.Utils.TopicPartitionComparator; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; /** * <p>The range assignor works on a per-topic basis. For each topic, we lay out the available partitions in numeric order * and the consumers in lexicographic order. We then divide the number of partitions by the total number of * consumers to determine the number of partitions to assign to each consumer. If it does not evenly * divide, then the first few consumers will have one extra partition. * * <p>For example, suppose there are two consumers <code>C0</code> and <code>C1</code>, two topics <code>t0</code> and * <code>t1</code>, and each topic has 3 partitions, resulting in partitions <code>t0p0</code>, <code>t0p1</code>, * <code>t0p2</code>, <code>t1p0</code>, <code>t1p1</code>, and <code>t1p2</code>. * * <p>The assignment will be: * <ul> * <li><code>C0: [t0p0, t0p1, t1p0, t1p1]</code></li> * <li><code>C1: [t0p2, t1p2]</code></li> * </ul> * * Since the introduction of static membership, we could leverage <code>group.instance.id</code> to make the assignment behavior more sticky. * For the above example, after one rolling bounce, group coordinator will attempt to assign new <code>member.id</code> towards consumers, * for example <code>C0</code> -&gt; <code>C3</code> <code>C1</code> -&gt; <code>C2</code>. * * <p>The assignment could be completely shuffled to: * <ul> * <li><code>C3 (was C0): [t0p2, t1p2] (before was [t0p0, t0p1, t1p0, t1p1])</code> * <li><code>C2 (was C1): [t0p0, t0p1, t1p0, t1p1] (before was [t0p2, t1p2])</code> * </ul> * * The assignment change was caused by the change of <code>member.id</code> relative order, and * can be avoided by setting the group.instance.id. * Consumers will have individual instance ids <code>I1</code>, <code>I2</code>. As long as * 1. Number of members remain the same across generation * 2. Static members' identities persist across generation * 3. Subscription pattern doesn't change for any member * * <p>The assignment will always be: * <ul> * <li><code>I0: [t0p0, t0p1, t1p0, t1p1]</code> * <li><code>I1: [t0p2, t1p2]</code> * </ul> * <p> * Rack-aware assignment is used if both consumer and partition replica racks are available and * some partitions have replicas only on a subset of racks. We attempt to match consumer racks with * partition replica racks on a best-effort basis, prioritizing balanced assignment over rack alignment. * Topics with equal partition count and same set of subscribers guarantee co-partitioning by prioritizing * co-partitioning over rack alignment. In this case, aligning partition replicas of these topics on the * same racks will improve locality for consumers. For example, if partitions 0 of all topics have a replica * on rack 'a', partition 1 on rack 'b' etc., partition 0 of all topics can be assigned to a consumer * on rack 'a', partition 1 to a consumer on rack 'b' and so on. * <p> * Note that rack-aware assignment currently takes all replicas into account, including any offline replicas * and replicas that are not in the ISR. This is based on the assumption that these replicas are likely * to join the ISR relatively soon. Since consumers don't rebalance on ISR change, this avoids unnecessary * cross-rack traffic for long durations after replicas rejoin the ISR. In the future, we may consider * rebalancing when replicas are added or removed to improve consumer rack alignment. * </p> */ public class RangeAssignor extends AbstractPartitionAssignor { public static final String RANGE_ASSIGNOR_NAME = "range"; private static final TopicPartitionComparator PARTITION_COMPARATOR = new TopicPartitionComparator(); @Override public String name() { return RANGE_ASSIGNOR_NAME; } private Map<String, List<MemberInfo>> consumersPerTopic(Map<String, Subscription> consumerMetadata) { Map<String, List<MemberInfo>> topicToConsumers = new HashMap<>(); consumerMetadata.forEach((consumerId, subscription) -> { MemberInfo memberInfo = new MemberInfo(consumerId, subscription.groupInstanceId(), subscription.rackId()); subscription.topics().forEach(topic -> put(topicToConsumers, topic, memberInfo)); }); return topicToConsumers; } /** * Performs range assignment of the specified partitions for the consumers with the provided subscriptions. * If rack-awareness is enabled for one or more consumers, we perform rack-aware assignment first to assign * the subset of partitions that can be aligned on racks, while retaining the same co-partitioning and * per-topic balancing guarantees as non-rack-aware range assignment. The remaining partitions are assigned * using standard non-rack-aware range assignment logic, which may result in mis-aligned racks. */ @Override public Map<String, List<TopicPartition>> assignPartitions(Map<String, List<PartitionInfo>> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<MemberInfo>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, String> consumerRacks = consumerRacks(subscriptions); List<TopicAssignmentState> topicAssignmentStates = partitionsPerTopic.entrySet().stream() .filter(e -> !e.getValue().isEmpty()) .map(e -> new TopicAssignmentState(e.getKey(), e.getValue(), consumersPerTopic.get(e.getKey()), consumerRacks)) .collect(Collectors.toList()); Map<String, List<TopicPartition>> assignment = new HashMap<>(); subscriptions.keySet().forEach(memberId -> assignment.put(memberId, new ArrayList<>())); boolean useRackAware = topicAssignmentStates.stream().anyMatch(t -> t.needsRackAwareAssignment); if (useRackAware) assignWithRackMatching(topicAssignmentStates, assignment); topicAssignmentStates.forEach(t -> assignRanges(t, (c, tp) -> true, assignment)); if (useRackAware) assignment.values().forEach(list -> list.sort(PARTITION_COMPARATOR)); return assignment; } // This method is not used, but retained for compatibility with any custom assignors that extend this class. @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { return assignPartitions(partitionInfosWithoutRacks(partitionsPerTopic), subscriptions); } private void assignRanges(TopicAssignmentState assignmentState, BiFunction<String, TopicPartition, Boolean> mayAssign, Map<String, List<TopicPartition>> assignment) { for (String consumer : assignmentState.consumers.keySet()) { if (assignmentState.unassignedPartitions.isEmpty()) break; List<TopicPartition> assignablePartitions = assignmentState.unassignedPartitions.stream() .filter(tp -> mayAssign.apply(consumer, tp)) .limit(assignmentState.maxAssignable(consumer)) .collect(Collectors.toList()); if (assignablePartitions.isEmpty()) continue; assign(consumer, assignablePartitions, assignmentState, assignment); } } private void assignWithRackMatching(Collection<TopicAssignmentState> assignmentStates, Map<String, List<TopicPartition>> assignment) { assignmentStates.stream().collect(Collectors.groupingBy(t -> t.consumers)).forEach((consumers, states) -> { states.stream().collect(Collectors.groupingBy(t -> t.partitionRacks.size())).forEach((numPartitions, coPartitionedStates) -> { if (coPartitionedStates.size() > 1) assignCoPartitionedWithRackMatching(consumers, numPartitions, coPartitionedStates, assignment); else { TopicAssignmentState state = coPartitionedStates.get(0); if (state.needsRackAwareAssignment) assignRanges(state, state::racksMatch, assignment); } }); }); } private void assignCoPartitionedWithRackMatching(LinkedHashMap<String, Optional<String>> consumers, int numPartitions, Collection<TopicAssignmentState> assignmentStates, Map<String, List<TopicPartition>> assignment) { Set<String> remainingConsumers = new LinkedHashSet<>(consumers.keySet()); for (int i = 0; i < numPartitions; i++) { int p = i; Optional<String> matchingConsumer = remainingConsumers.stream() .filter(c -> assignmentStates.stream().allMatch(t -> t.racksMatch(c, new TopicPartition(t.topic, p)) && t.maxAssignable(c) > 0)) .findFirst(); if (matchingConsumer.isPresent()) { String consumer = matchingConsumer.get(); assignmentStates.forEach(t -> assign(consumer, Collections.singletonList(new TopicPartition(t.topic, p)), t, assignment)); if (assignmentStates.stream().noneMatch(t -> t.maxAssignable(consumer) > 0)) { remainingConsumers.remove(consumer); if (remainingConsumers.isEmpty()) break; } } } } private void assign(String consumer, List<TopicPartition> partitions, TopicAssignmentState assignmentState, Map<String, List<TopicPartition>> assignment) { assignment.get(consumer).addAll(partitions); assignmentState.onAssigned(consumer, partitions); } private Map<String, String> consumerRacks(Map<String, Subscription> subscriptions) { Map<String, String> consumerRacks = new HashMap<>(subscriptions.size()); subscriptions.forEach((memberId, subscription) -> subscription.rackId().filter(r -> !r.isEmpty()).ifPresent(rackId -> consumerRacks.put(memberId, rackId))); return consumerRacks; } private class TopicAssignmentState { private final String topic; private final LinkedHashMap<String, Optional<String>> consumers; private final boolean needsRackAwareAssignment; private final Map<TopicPartition, Set<String>> partitionRacks; private final Set<TopicPartition> unassignedPartitions; private final Map<String, Integer> numAssignedByConsumer; private final int numPartitionsPerConsumer; private int remainingConsumersWithExtraPartition; public TopicAssignmentState(String topic, List<PartitionInfo> partitionInfos, List<MemberInfo> membersOrNull, Map<String, String> consumerRacks) { this.topic = topic; List<MemberInfo> members = membersOrNull == null ? Collections.emptyList() : membersOrNull; Collections.sort(members); consumers = members.stream().map(c -> c.memberId) .collect(Collectors.toMap(Function.identity(), c -> Optional.ofNullable(consumerRacks.get(c)), (a, b) -> a, LinkedHashMap::new)); this.unassignedPartitions = partitionInfos.stream().map(p -> new TopicPartition(p.topic(), p.partition())) .collect(Collectors.toCollection(LinkedHashSet::new)); this.numAssignedByConsumer = consumers.keySet().stream().collect(Collectors.toMap(Function.identity(), c -> 0)); numPartitionsPerConsumer = consumers.isEmpty() ? 0 : partitionInfos.size() / consumers.size(); remainingConsumersWithExtraPartition = consumers.isEmpty() ? 0 : partitionInfos.size() % consumers.size(); Set<String> allConsumerRacks = new HashSet<>(); Set<String> allPartitionRacks = new HashSet<>(); members.stream().map(m -> m.memberId).filter(consumerRacks::containsKey) .forEach(memberId -> allConsumerRacks.add(consumerRacks.get(memberId))); if (!allConsumerRacks.isEmpty()) { partitionRacks = new HashMap<>(partitionInfos.size()); partitionInfos.forEach(p -> { TopicPartition tp = new TopicPartition(p.topic(), p.partition()); Set<String> racks = Arrays.stream(p.replicas()) .map(Node::rack) .filter(Objects::nonNull) .collect(Collectors.toSet()); partitionRacks.put(tp, racks); allPartitionRacks.addAll(racks); }); } else { partitionRacks = Collections.emptyMap(); } needsRackAwareAssignment = useRackAwareAssignment(allConsumerRacks, allPartitionRacks, partitionRacks); } boolean racksMatch(String consumer, TopicPartition tp) { Optional<String> consumerRack = consumers.get(consumer); Set<String> replicaRacks = partitionRacks.get(tp); return !consumerRack.isPresent() || (replicaRacks != null && replicaRacks.contains(consumerRack.get())); } int maxAssignable(String consumer) { int maxForConsumer = numPartitionsPerConsumer + (remainingConsumersWithExtraPartition > 0 ? 1 : 0) - numAssignedByConsumer.get(consumer); return Math.max(0, maxForConsumer); } void onAssigned(String consumer, List<TopicPartition> newlyAssignedPartitions) { int numAssigned = numAssignedByConsumer.compute(consumer, (c, n) -> n + newlyAssignedPartitions.size()); if (numAssigned > numPartitionsPerConsumer) remainingConsumersWithExtraPartition--; unassignedPartitions.removeAll(newlyAssignedPartitions); } @Override public String toString() { return "TopicAssignmentState(" + "topic=" + topic + ", consumers=" + consumers + ", partitionRacks=" + partitionRacks + ", unassignedPartitions=" + unassignedPartitions + ")"; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/RetriableCommitFailedException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.common.errors.RetriableException; public class RetriableCommitFailedException extends RetriableException { private static final long serialVersionUID = 1L; public RetriableCommitFailedException(Throwable t) { super("Offset commit failed with a retriable exception. You should retry committing " + "the latest consumed offsets.", t); } public RetriableCommitFailedException(String message) { super(message); } public RetriableCommitFailedException(String message, Throwable t) { super(message, t); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/RoundRobinAssignor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignor; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.utils.CircularIterator; import org.apache.kafka.common.utils.Utils; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.SortedSet; import java.util.TreeSet; /** * <p>The round robin assignor lays out all the available partitions and all the available consumers. It * then proceeds to do a round robin assignment from partition to consumer. If the subscriptions of all consumer * instances are identical, then the partitions will be uniformly distributed. (i.e., the partition ownership counts * will be within a delta of exactly one across all consumers.) * * <p>For example, suppose there are two consumers <code>C0</code> and <code>C1</code>, two topics <code>t0</code> and <code>t1</code>, * and each topic has 3 partitions, resulting in partitions <code>t0p0</code>, <code>t0p1</code>, <code>t0p2</code>, * <code>t1p0</code>, <code>t1p1</code>, and <code>t1p2</code>. * * <p>The assignment will be: * <ul> * <li><code>C0: [t0p0, t0p2, t1p1]</code> * <li><code>C1: [t0p1, t1p0, t1p2]</code> * </ul> * * <p>When subscriptions differ across consumer instances, the assignment process still considers each * consumer instance in round robin fashion but skips over an instance if it is not subscribed to * the topic. Unlike the case when subscriptions are identical, this can result in imbalanced * assignments. For example, we have three consumers <code>C0</code>, <code>C1</code>, <code>C2</code>, * and three topics <code>t0</code>, <code>t1</code>, <code>t2</code>, with 1, 2, and 3 partitions, respectively. * Therefore, the partitions are <code>t0p0</code>, <code>t1p0</code>, <code>t1p1</code>, <code>t2p0</code>, <code>t2p1</code>, <code>t2p2</code>. * <code>C0</code> is subscribed to <code>t0</code>; * <code>C1</code> is subscribed to <code>t0</code>, <code>t1</code>; * and <code>C2</code> is subscribed to <code>t0</code>, <code>t1</code>, <code>t2</code>. * * <p>That assignment will be: * <ul> * <li><code>C0: [t0p0]</code> * <li><code>C1: [t1p0]</code> * <li><code>C2: [t1p1, t2p0, t2p1, t2p2]</code> * </ul> * * Since the introduction of static membership, we could leverage <code>group.instance.id</code> to make the assignment behavior more sticky. * For example, we have three consumers with assigned <code>member.id</code> <code>C0</code>, <code>C1</code>, <code>C2</code>, * two topics <code>t0</code> and <code>t1</code>, and each topic has 3 partitions, resulting in partitions <code>t0p0</code>, * <code>t0p1</code>, <code>t0p2</code>, <code>t1p0</code>, <code>t1p1</code>, and <code>t1p2</code>. We choose to honor * the sorted order based on ephemeral <code>member.id</code>. * * <p>The assignment will be: * <ul> * <li><code>C0: [t0p0, t1p0]</code> * <li><code>C1: [t0p1, t1p1]</code> * <li><code>C2: [t0p2, t1p2]</code> * </ul> * * After one rolling bounce, group coordinator will attempt to assign new <code>member.id</code> towards consumers, * for example <code>C0</code> -&gt; <code>C5</code> <code>C1</code> -&gt; <code>C3</code>, <code>C2</code> -&gt; <code>C4</code>. * * <p>The assignment could be completely shuffled to: * <ul> * <li><code>C3 (was C1): [t0p0, t1p0] (before was [t0p1, t1p1])</code> * <li><code>C4 (was C2): [t0p1, t1p1] (before was [t0p2, t1p2])</code> * <li><code>C5 (was C0): [t0p2, t1p2] (before was [t0p0, t1p0])</code> * </ul> * * This issue could be mitigated by the introduction of static membership. Consumers will have individual instance ids * <code>I1</code>, <code>I2</code>, <code>I3</code>. As long as * 1. Number of members remain the same across generation * 2. Static members' identities persist across generation * 3. Subscription pattern doesn't change for any member * * <p>The assignment will always be: * <ul> * <li><code>I0: [t0p0, t1p0]</code> * <li><code>I1: [t0p1, t1p1]</code> * <li><code>I2: [t0p2, t1p2]</code> * </ul> */ public class RoundRobinAssignor extends AbstractPartitionAssignor { public static final String ROUNDROBIN_ASSIGNOR_NAME = "roundrobin"; @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); List<MemberInfo> memberInfoList = new ArrayList<>(); for (Map.Entry<String, Subscription> memberSubscription : subscriptions.entrySet()) { assignment.put(memberSubscription.getKey(), new ArrayList<>()); memberInfoList.add(new MemberInfo(memberSubscription.getKey(), memberSubscription.getValue().groupInstanceId())); } CircularIterator<MemberInfo> assigner = new CircularIterator<>(Utils.sorted(memberInfoList)); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek().memberId).topics().contains(topic)) assigner.next(); assignment.get(assigner.next().memberId).add(partition); } return assignment; } private List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { SortedSet<String> topics = new TreeSet<>(); for (Subscription subscription : subscriptions.values()) topics.addAll(subscription.topics()); List<TopicPartition> allPartitions = new ArrayList<>(); for (String topic : topics) { Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic != null) allPartitions.addAll(AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic)); } return allPartitions; } @Override public String name() { return ROUNDROBIN_ASSIGNOR_NAME; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/StickyAssignor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import org.apache.kafka.clients.consumer.internals.AbstractStickyAssignor; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Struct; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.CollectionUtils; /** * <p>The sticky assignor serves two purposes. First, it guarantees an assignment that is as balanced as possible, meaning either: * <ul> * <li>the numbers of topic partitions assigned to consumers differ by at most one; or</li> * <li>each consumer that has 2+ fewer topic partitions than some other consumer cannot get any of those topic partitions transferred to it.</li> * </ul> * Second, it preserved as many existing assignment as possible when a reassignment occurs. This helps in saving some of the * overhead processing when topic partitions move from one consumer to another.</p> * * <p>Starting fresh it would work by distributing the partitions over consumers as evenly as possible. Even though this may sound similar to * how round robin assignor works, the second example below shows that it is not. * During a reassignment it would perform the reassignment in such a way that in the new assignment * <ol> * <li>topic partitions are still distributed as evenly as possible, and</li> * <li>topic partitions stay with their previously assigned consumers as much as possible.</li> * </ol> * Of course, the first goal above takes precedence over the second one.</p> * * <p><b>Example 1.</b> Suppose there are three consumers <code>C0</code>, <code>C1</code>, <code>C2</code>, * four topics <code>t0,</code> <code>t1</code>, <code>t2</code>, <code>t3</code>, and each topic has 2 partitions, * resulting in partitions <code>t0p0</code>, <code>t0p1</code>, <code>t1p0</code>, <code>t1p1</code>, <code>t2p0</code>, * <code>t2p1</code>, <code>t3p0</code>, <code>t3p1</code>. Each consumer is subscribed to all three topics. * * The assignment with both sticky and round robin assignors will be: * <ul> * <li><code>C0: [t0p0, t1p1, t3p0]</code></li> * <li><code>C1: [t0p1, t2p0, t3p1]</code></li> * <li><code>C2: [t1p0, t2p1]</code></li> * </ul> * * Now, let's assume <code>C1</code> is removed and a reassignment is about to happen. The round robin assignor would produce: * <ul> * <li><code>C0: [t0p0, t1p0, t2p0, t3p0]</code></li> * <li><code>C2: [t0p1, t1p1, t2p1, t3p1]</code></li> * </ul> * * while the sticky assignor would result in: * <ul> * <li><code>C0 [t0p0, t1p1, t3p0, t2p0]</code></li> * <li><code>C2 [t1p0, t2p1, t0p1, t3p1]</code></li> * </ul> * preserving all the previous assignments (unlike the round robin assignor). *</p> * <p><b>Example 2.</b> There are three consumers <code>C0</code>, <code>C1</code>, <code>C2</code>, * and three topics <code>t0</code>, <code>t1</code>, <code>t2</code>, with 1, 2, and 3 partitions respectively. * Therefore, the partitions are <code>t0p0</code>, <code>t1p0</code>, <code>t1p1</code>, <code>t2p0</code>, * <code>t2p1</code>, <code>t2p2</code>. <code>C0</code> is subscribed to <code>t0</code>; <code>C1</code> is subscribed to * <code>t0</code>, <code>t1</code>; and <code>C2</code> is subscribed to <code>t0</code>, <code>t1</code>, <code>t2</code>. * * The round robin assignor would come up with the following assignment: * <ul> * <li><code>C0 [t0p0]</code></li> * <li><code>C1 [t1p0]</code></li> * <li><code>C2 [t1p1, t2p0, t2p1, t2p2]</code></li> * </ul> * * which is not as balanced as the assignment suggested by sticky assignor: * <ul> * <li><code>C0 [t0p0]</code></li> * <li><code>C1 [t1p0, t1p1]</code></li> * <li><code>C2 [t2p0, t2p1, t2p2]</code></li> * </ul> * * Now, if consumer <code>C0</code> is removed, these two assignors would produce the following assignments. * Round Robin (preserves 3 partition assignments): * <ul> * <li><code>C1 [t0p0, t1p1]</code></li> * <li><code>C2 [t1p0, t2p0, t2p1, t2p2]</code></li> * </ul> * * Sticky (preserves 5 partition assignments): * <ul> * <li><code>C1 [t1p0, t1p1, t0p0]</code></li> * <li><code>C2 [t2p0, t2p1, t2p2]</code></li> * </ul> *</p> * <h3>Impact on <code>ConsumerRebalanceListener</code></h3> * The sticky assignment strategy can provide some optimization to those consumers that have some partition cleanup code * in their <code>onPartitionsRevoked()</code> callback listeners. The cleanup code is placed in that callback listener * because the consumer has no assumption or hope of preserving any of its assigned partitions after a rebalance when it * is using range or round robin assignor. The listener code would look like this: * <pre> * {@code * class TheOldRebalanceListener implements ConsumerRebalanceListener { * * void onPartitionsRevoked(Collection<TopicPartition> partitions) { * for (TopicPartition partition: partitions) { * commitOffsets(partition); * cleanupState(partition); * } * } * * void onPartitionsAssigned(Collection<TopicPartition> partitions) { * for (TopicPartition partition: partitions) { * initializeState(partition); * initializeOffset(partition); * } * } * } * } * </pre> * * As mentioned above, one advantage of the sticky assignor is that, in general, it reduces the number of partitions that * actually move from one consumer to another during a reassignment. Therefore, it allows consumers to do their cleanup * more efficiently. Of course, they still can perform the partition cleanup in the <code>onPartitionsRevoked()</code> * listener, but they can be more efficient and make a note of their partitions before and after the rebalance, and do the * cleanup after the rebalance only on the partitions they have lost (which is normally not a lot). The code snippet below * clarifies this point: * <pre> * {@code * class TheNewRebalanceListener implements ConsumerRebalanceListener { * Collection<TopicPartition> lastAssignment = Collections.emptyList(); * * void onPartitionsRevoked(Collection<TopicPartition> partitions) { * for (TopicPartition partition: partitions) * commitOffsets(partition); * } * * void onPartitionsAssigned(Collection<TopicPartition> assignment) { * for (TopicPartition partition: difference(lastAssignment, assignment)) * cleanupState(partition); * * for (TopicPartition partition: difference(assignment, lastAssignment)) * initializeState(partition); * * for (TopicPartition partition: assignment) * initializeOffset(partition); * * this.lastAssignment = assignment; * } * } * } * </pre> * * Any consumer that uses sticky assignment can leverage this listener like this: * <code>consumer.subscribe(topics, new TheNewRebalanceListener());</code> * * Note that you can leverage the {@link CooperativeStickyAssignor} so that only partitions which are being * reassigned to another consumer will be revoked. That is the preferred assignor for newer cluster. See * {@link ConsumerPartitionAssignor.RebalanceProtocol} for a detailed explanation of cooperative rebalancing. */ public class StickyAssignor extends AbstractStickyAssignor { public static final String STICKY_ASSIGNOR_NAME = "sticky"; // these schemas are used for preserving consumer's previously assigned partitions // list and sending it as user data to the leader during a rebalance static final String TOPIC_PARTITIONS_KEY_NAME = "previous_assignment"; static final String TOPIC_KEY_NAME = "topic"; static final String PARTITIONS_KEY_NAME = "partitions"; private static final String GENERATION_KEY_NAME = "generation"; static final Schema TOPIC_ASSIGNMENT = new Schema( new Field(TOPIC_KEY_NAME, Type.STRING), new Field(PARTITIONS_KEY_NAME, new ArrayOf(Type.INT32))); static final Schema STICKY_ASSIGNOR_USER_DATA_V0 = new Schema( new Field(TOPIC_PARTITIONS_KEY_NAME, new ArrayOf(TOPIC_ASSIGNMENT))); private static final Schema STICKY_ASSIGNOR_USER_DATA_V1 = new Schema( new Field(TOPIC_PARTITIONS_KEY_NAME, new ArrayOf(TOPIC_ASSIGNMENT)), new Field(GENERATION_KEY_NAME, Type.INT32)); private List<TopicPartition> memberAssignment = null; private int generation = DEFAULT_GENERATION; // consumer group generation @Override public String name() { return STICKY_ASSIGNOR_NAME; } @Override public void onAssignment(Assignment assignment, ConsumerGroupMetadata metadata) { memberAssignment = assignment.partitions(); this.generation = metadata.generationId(); } @Override public ByteBuffer subscriptionUserData(Set<String> topics) { if (memberAssignment == null) return null; return serializeTopicPartitionAssignment(new MemberData(memberAssignment, Optional.of(generation))); } @Override protected MemberData memberData(Subscription subscription) { // Always deserialize ownedPartitions and generation id from user data // since StickyAssignor is an eager rebalance protocol that will revoke all existing partitions before joining group ByteBuffer userData = subscription.userData(); if (userData == null || !userData.hasRemaining()) { return new MemberData(Collections.emptyList(), Optional.empty(), subscription.rackId()); } return deserializeTopicPartitionAssignment(userData); } // visible for testing static ByteBuffer serializeTopicPartitionAssignment(MemberData memberData) { Struct struct = new Struct(STICKY_ASSIGNOR_USER_DATA_V1); List<Struct> topicAssignments = new ArrayList<>(); for (Map.Entry<String, List<Integer>> topicEntry : CollectionUtils.groupPartitionsByTopic(memberData.partitions).entrySet()) { Struct topicAssignment = new Struct(TOPIC_ASSIGNMENT); topicAssignment.set(TOPIC_KEY_NAME, topicEntry.getKey()); topicAssignment.set(PARTITIONS_KEY_NAME, topicEntry.getValue().toArray()); topicAssignments.add(topicAssignment); } struct.set(TOPIC_PARTITIONS_KEY_NAME, topicAssignments.toArray()); if (memberData.generation.isPresent()) struct.set(GENERATION_KEY_NAME, memberData.generation.get()); ByteBuffer buffer = ByteBuffer.allocate(STICKY_ASSIGNOR_USER_DATA_V1.sizeOf(struct)); STICKY_ASSIGNOR_USER_DATA_V1.write(buffer, struct); buffer.flip(); return buffer; } private static MemberData deserializeTopicPartitionAssignment(ByteBuffer buffer) { Struct struct; ByteBuffer copy = buffer.duplicate(); try { struct = STICKY_ASSIGNOR_USER_DATA_V1.read(buffer); } catch (Exception e1) { try { // fall back to older schema struct = STICKY_ASSIGNOR_USER_DATA_V0.read(copy); } catch (Exception e2) { // ignore the consumer's previous assignment if it cannot be parsed return new MemberData(Collections.emptyList(), Optional.of(DEFAULT_GENERATION)); } } List<TopicPartition> partitions = new ArrayList<>(); for (Object structObj : struct.getArray(TOPIC_PARTITIONS_KEY_NAME)) { Struct assignment = (Struct) structObj; String topic = assignment.getString(TOPIC_KEY_NAME); for (Object partitionObj : assignment.getArray(PARTITIONS_KEY_NAME)) { Integer partition = (Integer) partitionObj; partitions.add(new TopicPartition(topic, partition)); } } // make sure this is backward compatible Optional<Integer> generation = struct.hasField(GENERATION_KEY_NAME) ? Optional.of(struct.getInt(GENERATION_KEY_NAME)) : Optional.empty(); return new MemberData(partitions, generation); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides a Kafka client for consuming records from topics and/or partitions in a Kafka cluster. */ package org.apache.kafka.clients.consumer;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; import org.apache.kafka.common.errors.FencedInstanceIdException; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.GroupMaxSizeReachedException; import org.apache.kafka.common.errors.IllegalGenerationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.MemberIdRequiredException; import org.apache.kafka.common.errors.RebalanceInProgressException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.UnknownMemberIdException; import org.apache.kafka.common.message.FindCoordinatorRequestData; import org.apache.kafka.common.message.FindCoordinatorResponseData.Coordinator; import org.apache.kafka.common.message.HeartbeatRequestData; import org.apache.kafka.common.message.JoinGroupRequestData; import org.apache.kafka.common.message.JoinGroupResponseData; import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity; import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse; import org.apache.kafka.common.message.SyncGroupRequestData; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.CumulativeCount; import org.apache.kafka.common.metrics.stats.CumulativeSum; import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.metrics.stats.Meter; import org.apache.kafka.common.metrics.stats.Rate; import org.apache.kafka.common.metrics.stats.WindowedCount; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.requests.FindCoordinatorResponse; import org.apache.kafka.common.requests.HeartbeatRequest; import org.apache.kafka.common.requests.HeartbeatResponse; import org.apache.kafka.common.requests.JoinGroupRequest; import org.apache.kafka.common.requests.JoinGroupResponse; import org.apache.kafka.common.requests.LeaveGroupRequest; import org.apache.kafka.common.requests.LeaveGroupResponse; import org.apache.kafka.common.requests.OffsetCommitRequest; import org.apache.kafka.common.requests.SyncGroupRequest; import org.apache.kafka.common.requests.SyncGroupResponse; import org.apache.kafka.common.utils.KafkaThread; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import java.io.Closeable; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; /** * AbstractCoordinator implements group management for a single group member by interacting with * a designated Kafka broker (the coordinator). Group semantics are provided by extending this class. * See {@link ConsumerCoordinator} for example usage. * * From a high level, Kafka's group management protocol consists of the following sequence of actions: * * <ol> * <li>Group Registration: Group members register with the coordinator providing their own metadata * (such as the set of topics they are interested in).</li> * <li>Group/Leader Selection: The coordinator select the members of the group and chooses one member * as the leader.</li> * <li>State Assignment: The leader collects the metadata from all the members of the group and * assigns state.</li> * <li>Group Stabilization: Each member receives the state assigned by the leader and begins * processing.</li> * </ol> * * To leverage this protocol, an implementation must define the format of metadata provided by each * member for group registration in {@link #metadata()} and the format of the state assignment provided * by the leader in {@link #onLeaderElected(String, String, List, boolean)} and becomes available to members in * {@link #onJoinComplete(int, String, String, ByteBuffer)}. * * Note on locking: this class shares state between the caller and a background thread which is * used for sending heartbeats after the client has joined the group. All mutable state as well as * state transitions are protected with the class's monitor. Generally this means acquiring the lock * before reading or writing the state of the group (e.g. generation, memberId) and holding the lock * when sending a request that affects the state of the group (e.g. JoinGroup, LeaveGroup). */ public abstract class AbstractCoordinator implements Closeable { public static final String HEARTBEAT_THREAD_PREFIX = "kafka-coordinator-heartbeat-thread"; public static final int JOIN_GROUP_TIMEOUT_LAPSE = 5000; protected enum MemberState { UNJOINED, // the client is not part of a group PREPARING_REBALANCE, // the client has sent the join group request, but have not received response COMPLETING_REBALANCE, // the client has received join group response, but have not received assignment STABLE; // the client has joined and is sending heartbeats public boolean hasNotJoinedGroup() { return equals(UNJOINED) || equals(PREPARING_REBALANCE); } } private final Logger log; private final Heartbeat heartbeat; private final GroupCoordinatorMetrics sensors; private final GroupRebalanceConfig rebalanceConfig; protected final Time time; protected final ConsumerNetworkClient client; private Node coordinator = null; private String rejoinReason = ""; private boolean rejoinNeeded = true; private boolean needsJoinPrepare = true; private HeartbeatThread heartbeatThread = null; private RequestFuture<ByteBuffer> joinFuture = null; private RequestFuture<Void> findCoordinatorFuture = null; private volatile RuntimeException fatalFindCoordinatorException = null; private Generation generation = Generation.NO_GENERATION; private long lastRebalanceStartMs = -1L; private long lastRebalanceEndMs = -1L; private long lastTimeOfConnectionMs = -1L; // starting logging a warning only after unable to connect for a while protected MemberState state = MemberState.UNJOINED; /** * Initialize the coordination manager. */ public AbstractCoordinator(GroupRebalanceConfig rebalanceConfig, LogContext logContext, ConsumerNetworkClient client, Metrics metrics, String metricGrpPrefix, Time time) { Objects.requireNonNull(rebalanceConfig.groupId, "Expected a non-null group id for coordinator construction"); this.rebalanceConfig = rebalanceConfig; this.log = logContext.logger(this.getClass()); this.client = client; this.time = time; this.heartbeat = new Heartbeat(rebalanceConfig, time); this.sensors = new GroupCoordinatorMetrics(metrics, metricGrpPrefix); } /** * Unique identifier for the class of supported protocols (e.g. "consumer" or "connect"). * @return Non-null protocol type name */ protected abstract String protocolType(); /** * Get the current list of protocols and their associated metadata supported * by the local member. The order of the protocols in the list indicates the preference * of the protocol (the first entry is the most preferred). The coordinator takes this * preference into account when selecting the generation protocol (generally more preferred * protocols will be selected as long as all members support them and there is no disagreement * on the preference). * @return Non-empty map of supported protocols and metadata */ protected abstract JoinGroupRequestData.JoinGroupRequestProtocolCollection metadata(); /** * Invoked prior to each group join or rejoin. This is typically used to perform any * cleanup from the previous generation (such as committing offsets for the consumer) * @param timer Timer bounding how long this method can block * @param generation The previous generation or -1 if there was none * @param memberId The identifier of this member in the previous group or "" if there was none * @return true If onJoinPrepare async commit succeeded, false otherwise */ protected abstract boolean onJoinPrepare(Timer timer, int generation, String memberId); /** * Invoked when the leader is elected. This is used by the leader to perform the assignment * if necessary and to push state to all the members of the group (e.g. to push partition * assignments in the case of the new consumer) * @param leaderId The id of the leader (which is this member) * @param protocol The protocol selected by the coordinator * @param allMemberMetadata Metadata from all members of the group * @param skipAssignment True if leader must skip running the assignor * @return A map from each member to their state assignment */ protected abstract Map<String, ByteBuffer> onLeaderElected(String leaderId, String protocol, List<JoinGroupResponseData.JoinGroupResponseMember> allMemberMetadata, boolean skipAssignment); /** * Invoked when a group member has successfully joined a group. If this call fails with an exception, * then it will be retried using the same assignment state on the next call to {@link #ensureActiveGroup()}. * * @param generation The generation that was joined * @param memberId The identifier for the local member in the group * @param protocol The protocol selected by the coordinator * @param memberAssignment The assignment propagated from the group leader */ protected abstract void onJoinComplete(int generation, String memberId, String protocol, ByteBuffer memberAssignment); /** * Invoked prior to each leave group event. This is typically used to cleanup assigned partitions; * note it is triggered by the consumer's API caller thread (i.e. background heartbeat thread would * not trigger it even if it tries to force leaving group upon heartbeat session expiration) */ protected void onLeavePrepare() {} /** * Ensure that the coordinator is ready to receive requests. * * @param timer Timer bounding how long this method can block * @return true If coordinator discovery and initial connection succeeded, false otherwise */ protected synchronized boolean ensureCoordinatorReady(final Timer timer) { return ensureCoordinatorReady(timer, false); } /** * Ensure that the coordinator is ready to receive requests. This will return * immediately without blocking. It is intended to be called in an asynchronous * context when wakeups are not expected. * * @return true If coordinator discovery and initial connection succeeded, false otherwise */ protected synchronized boolean ensureCoordinatorReadyAsync() { return ensureCoordinatorReady(time.timer(0), true); } private synchronized boolean ensureCoordinatorReady(final Timer timer, boolean disableWakeup) { if (!coordinatorUnknown()) return true; do { if (fatalFindCoordinatorException != null) { final RuntimeException fatalException = fatalFindCoordinatorException; fatalFindCoordinatorException = null; throw fatalException; } final RequestFuture<Void> future = lookupCoordinator(); client.poll(future, timer, disableWakeup); if (!future.isDone()) { // ran out of time break; } RuntimeException fatalException = null; if (future.failed()) { if (future.isRetriable()) { log.debug("Coordinator discovery failed, refreshing metadata", future.exception()); client.awaitMetadataUpdate(timer); } else { fatalException = future.exception(); log.info("FindCoordinator request hit fatal exception", fatalException); } } else if (coordinator != null && client.isUnavailable(coordinator)) { // we found the coordinator, but the connection has failed, so mark // it dead and backoff before retrying discovery markCoordinatorUnknown("coordinator unavailable"); timer.sleep(rebalanceConfig.retryBackoffMs); } clearFindCoordinatorFuture(); if (fatalException != null) throw fatalException; } while (coordinatorUnknown() && timer.notExpired()); return !coordinatorUnknown(); } protected synchronized RequestFuture<Void> lookupCoordinator() { if (findCoordinatorFuture == null) { // find a node to ask about the coordinator Node node = this.client.leastLoadedNode(); if (node == null) { log.debug("No broker available to send FindCoordinator request"); return RequestFuture.noBrokersAvailable(); } else { findCoordinatorFuture = sendFindCoordinatorRequest(node); } } return findCoordinatorFuture; } private synchronized void clearFindCoordinatorFuture() { findCoordinatorFuture = null; } /** * Check whether the group should be rejoined (e.g. if metadata changes) or whether a * rejoin request is already in flight and needs to be completed. * * @return true if it should, false otherwise */ protected synchronized boolean rejoinNeededOrPending() { // if there's a pending joinFuture, we should try to complete handling it. return rejoinNeeded || joinFuture != null; } /** * Check the status of the heartbeat thread (if it is active) and indicate the liveness * of the client. This must be called periodically after joining with {@link #ensureActiveGroup()} * to ensure that the member stays in the group. If an interval of time longer than the * provided rebalance timeout expires without calling this method, then the client will proactively * leave the group. * * @param now current time in milliseconds * @throws RuntimeException for unexpected errors raised from the heartbeat thread */ protected synchronized void pollHeartbeat(long now) { if (heartbeatThread != null) { if (heartbeatThread.hasFailed()) { // set the heartbeat thread to null and raise an exception. If the user catches it, // the next call to ensureActiveGroup() will spawn a new heartbeat thread. RuntimeException cause = heartbeatThread.failureCause(); heartbeatThread = null; throw cause; } // Awake the heartbeat thread if needed if (heartbeat.shouldHeartbeat(now)) { notify(); } heartbeat.poll(now); } } protected synchronized long timeToNextHeartbeat(long now) { // if we have not joined the group or we are preparing rebalance, // we don't need to send heartbeats if (state.hasNotJoinedGroup()) return Long.MAX_VALUE; if (heartbeatThread != null && heartbeatThread.hasFailed()) { // if an exception occurs in the heartbeat thread, raise it. throw heartbeatThread.failureCause(); } return heartbeat.timeToNextHeartbeat(now); } /** * Ensure that the group is active (i.e. joined and synced) */ public void ensureActiveGroup() { while (!ensureActiveGroup(time.timer(Long.MAX_VALUE))) { log.warn("still waiting to ensure active group"); } } /** * Ensure the group is active (i.e., joined and synced) * * @param timer Timer bounding how long this method can block * @throws KafkaException if the callback throws exception * @return true iff the group is active */ boolean ensureActiveGroup(final Timer timer) { // always ensure that the coordinator is ready because we may have been disconnected // when sending heartbeats and does not necessarily require us to rejoin the group. if (!ensureCoordinatorReady(timer)) { return false; } startHeartbeatThreadIfNeeded(); return joinGroupIfNeeded(timer); } private synchronized void startHeartbeatThreadIfNeeded() { if (heartbeatThread == null) { heartbeatThread = new HeartbeatThread(); heartbeatThread.start(); } } private void closeHeartbeatThread() { HeartbeatThread thread; synchronized (this) { if (heartbeatThread == null) return; heartbeatThread.close(); thread = heartbeatThread; heartbeatThread = null; } try { thread.join(); } catch (InterruptedException e) { log.warn("Interrupted while waiting for consumer heartbeat thread to close"); throw new InterruptException(e); } } /** * Joins the group without starting the heartbeat thread. * * If this function returns true, the state must always be in STABLE and heartbeat enabled. * If this function returns false, the state can be in one of the following: * * UNJOINED: got error response but times out before being able to re-join, heartbeat disabled * * PREPARING_REBALANCE: not yet received join-group response before timeout, heartbeat disabled * * COMPLETING_REBALANCE: not yet received sync-group response before timeout, heartbeat enabled * * Visible for testing. * * @param timer Timer bounding how long this method can block * @throws KafkaException if the callback throws exception * @return true iff the operation succeeded */ boolean joinGroupIfNeeded(final Timer timer) { while (rejoinNeededOrPending()) { if (!ensureCoordinatorReady(timer)) { return false; } // call onJoinPrepare if needed. We set a flag to make sure that we do not call it a second // time if the client is woken up before a pending rebalance completes. This must be called // on each iteration of the loop because an event requiring a rebalance (such as a metadata // refresh which changes the matched subscription set) can occur while another rebalance is // still in progress. if (needsJoinPrepare) { // need to set the flag before calling onJoinPrepare since the user callback may throw // exception, in which case upon retry we should not retry onJoinPrepare either. needsJoinPrepare = false; // return false when onJoinPrepare is waiting for committing offset if (!onJoinPrepare(timer, generation.generationId, generation.memberId)) { needsJoinPrepare = true; //should not initiateJoinGroup if needsJoinPrepare still is true return false; } } final RequestFuture<ByteBuffer> future = initiateJoinGroup(); client.poll(future, timer); if (!future.isDone()) { // we ran out of time return false; } if (future.succeeded()) { Generation generationSnapshot; MemberState stateSnapshot; // Generation data maybe concurrently cleared by Heartbeat thread. // Can't use synchronized for {@code onJoinComplete}, because it can be long enough // and shouldn't block heartbeat thread. // See {@link PlaintextConsumerTest#testMaxPollIntervalMsDelayInAssignment} synchronized (AbstractCoordinator.this) { generationSnapshot = this.generation; stateSnapshot = this.state; } if (!hasGenerationReset(generationSnapshot) && stateSnapshot == MemberState.STABLE) { // Duplicate the buffer in case `onJoinComplete` does not complete and needs to be retried. ByteBuffer memberAssignment = future.value().duplicate(); onJoinComplete(generationSnapshot.generationId, generationSnapshot.memberId, generationSnapshot.protocolName, memberAssignment); // Generally speaking we should always resetJoinGroupFuture once the future is done, but here // we can only reset the join group future after the completion callback returns. This ensures // that if the callback is woken up, we will retry it on the next joinGroupIfNeeded. // And because of that we should explicitly trigger resetJoinGroupFuture in other conditions below. resetJoinGroupFuture(); needsJoinPrepare = true; } else { final String reason = String.format("rebalance failed since the generation/state was " + "modified by heartbeat thread to %s/%s before the rebalance callback triggered", generationSnapshot, stateSnapshot); resetStateAndRejoin(reason, true); resetJoinGroupFuture(); } } else { final RuntimeException exception = future.exception(); resetJoinGroupFuture(); synchronized (AbstractCoordinator.this) { final String simpleName = exception.getClass().getSimpleName(); final String shortReason = String.format("rebalance failed due to %s", simpleName); final String fullReason = String.format("rebalance failed due to '%s' (%s)", exception.getMessage(), simpleName); requestRejoin(shortReason, fullReason); } if (exception instanceof UnknownMemberIdException || exception instanceof IllegalGenerationException || exception instanceof RebalanceInProgressException || exception instanceof MemberIdRequiredException) continue; else if (!future.isRetriable()) throw exception; // We need to return upon expired timer, in case if the client.poll returns immediately and the time // has elapsed. if (timer.isExpired()) { return false; } timer.sleep(rebalanceConfig.retryBackoffMs); } } return true; } private synchronized void resetJoinGroupFuture() { this.joinFuture = null; } private synchronized RequestFuture<ByteBuffer> initiateJoinGroup() { // we store the join future in case we are woken up by the user after beginning the // rebalance in the call to poll below. This ensures that we do not mistakenly attempt // to rejoin before the pending rebalance has completed. if (joinFuture == null) { state = MemberState.PREPARING_REBALANCE; // a rebalance can be triggered consecutively if the previous one failed, // in this case we would not update the start time. if (lastRebalanceStartMs == -1L) lastRebalanceStartMs = time.milliseconds(); joinFuture = sendJoinGroupRequest(); joinFuture.addListener(new RequestFutureListener<ByteBuffer>() { @Override public void onSuccess(ByteBuffer value) { // do nothing since all the handler logic are in SyncGroupResponseHandler already } @Override public void onFailure(RuntimeException e) { // we handle failures below after the request finishes. if the join completes // after having been woken up, the exception is ignored and we will rejoin; // this can be triggered when either join or sync request failed synchronized (AbstractCoordinator.this) { sensors.failedRebalanceSensor.record(); } } }); } return joinFuture; } /** * Join the group and return the assignment for the next generation. This function handles both * JoinGroup and SyncGroup, delegating to {@link #onLeaderElected(String, String, List, boolean)} if * elected leader by the coordinator. * * NOTE: This is visible only for testing * * @return A request future which wraps the assignment returned from the group leader */ RequestFuture<ByteBuffer> sendJoinGroupRequest() { if (coordinatorUnknown()) return RequestFuture.coordinatorNotAvailable(); // send a join group request to the coordinator log.info("(Re-)joining group"); JoinGroupRequest.Builder requestBuilder = new JoinGroupRequest.Builder( new JoinGroupRequestData() .setGroupId(rebalanceConfig.groupId) .setSessionTimeoutMs(this.rebalanceConfig.sessionTimeoutMs) .setMemberId(this.generation.memberId) .setGroupInstanceId(this.rebalanceConfig.groupInstanceId.orElse(null)) .setProtocolType(protocolType()) .setProtocols(metadata()) .setRebalanceTimeoutMs(this.rebalanceConfig.rebalanceTimeoutMs) .setReason(JoinGroupRequest.maybeTruncateReason(this.rejoinReason)) ); log.debug("Sending JoinGroup ({}) to coordinator {}", requestBuilder, this.coordinator); // Note that we override the request timeout using the rebalance timeout since that is the // maximum time that it may block on the coordinator. We add an extra 5 seconds for small delays. int joinGroupTimeoutMs = Math.max( client.defaultRequestTimeoutMs(), Math.max( rebalanceConfig.rebalanceTimeoutMs + JOIN_GROUP_TIMEOUT_LAPSE, rebalanceConfig.rebalanceTimeoutMs) // guard against overflow since rebalance timeout can be MAX_VALUE ); return client.send(coordinator, requestBuilder, joinGroupTimeoutMs) .compose(new JoinGroupResponseHandler(generation)); } private class JoinGroupResponseHandler extends CoordinatorResponseHandler<JoinGroupResponse, ByteBuffer> { private JoinGroupResponseHandler(final Generation generation) { super(generation); } @Override public void handle(JoinGroupResponse joinResponse, RequestFuture<ByteBuffer> future) { Errors error = joinResponse.error(); if (error == Errors.NONE) { if (isProtocolTypeInconsistent(joinResponse.data().protocolType())) { log.error("JoinGroup failed: Inconsistent Protocol Type, received {} but expected {}", joinResponse.data().protocolType(), protocolType()); future.raise(Errors.INCONSISTENT_GROUP_PROTOCOL); } else { log.debug("Received successful JoinGroup response: {}", joinResponse); sensors.joinSensor.record(response.requestLatencyMs()); synchronized (AbstractCoordinator.this) { if (state != MemberState.PREPARING_REBALANCE) { // if the consumer was woken up before a rebalance completes, we may have already left // the group. In this case, we do not want to continue with the sync group. future.raise(new UnjoinedGroupException()); } else { state = MemberState.COMPLETING_REBALANCE; // we only need to enable heartbeat thread whenever we transit to // COMPLETING_REBALANCE state since we always transit from this state to STABLE if (heartbeatThread != null) heartbeatThread.enable(); AbstractCoordinator.this.generation = new Generation( joinResponse.data().generationId(), joinResponse.data().memberId(), joinResponse.data().protocolName()); log.info("Successfully joined group with generation {}", AbstractCoordinator.this.generation); if (joinResponse.isLeader()) { onLeaderElected(joinResponse).chain(future); } else { onJoinFollower().chain(future); } } } } } else if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS) { log.info("JoinGroup failed: Coordinator {} is loading the group.", coordinator()); // backoff and retry future.raise(error); } else if (error == Errors.UNKNOWN_MEMBER_ID) { log.info("JoinGroup failed: {} Need to re-join the group. Sent generation was {}", error.message(), sentGeneration); // only need to reset the member id if generation has not been changed, // then retry immediately if (generationUnchanged()) resetStateOnResponseError(ApiKeys.JOIN_GROUP, error, true); future.raise(error); } else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR) { // re-discover the coordinator and retry with backoff markCoordinatorUnknown(error); log.info("JoinGroup failed: {} Marking coordinator unknown. Sent generation was {}", error.message(), sentGeneration); future.raise(error); } else if (error == Errors.FENCED_INSTANCE_ID) { // for join-group request, even if the generation has changed we would not expect the instance id // gets fenced, and hence we always treat this as a fatal error log.error("JoinGroup failed: The group instance id {} has been fenced by another instance. " + "Sent generation was {}", rebalanceConfig.groupInstanceId, sentGeneration); future.raise(error); } else if (error == Errors.INCONSISTENT_GROUP_PROTOCOL || error == Errors.INVALID_SESSION_TIMEOUT || error == Errors.INVALID_GROUP_ID || error == Errors.GROUP_AUTHORIZATION_FAILED || error == Errors.GROUP_MAX_SIZE_REACHED) { // log the error and re-throw the exception log.error("JoinGroup failed due to fatal error: {}", error.message()); if (error == Errors.GROUP_MAX_SIZE_REACHED) { future.raise(new GroupMaxSizeReachedException("Consumer group " + rebalanceConfig.groupId + " already has the configured maximum number of members.")); } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) { future.raise(GroupAuthorizationException.forGroupId(rebalanceConfig.groupId)); } else { future.raise(error); } } else if (error == Errors.UNSUPPORTED_VERSION) { log.error("JoinGroup failed due to unsupported version error. Please unset field group.instance.id " + "and retry to see if the problem resolves"); future.raise(error); } else if (error == Errors.MEMBER_ID_REQUIRED) { // Broker requires a concrete member id to be allowed to join the group. Update member id // and send another join group request in next cycle. String memberId = joinResponse.data().memberId(); log.debug("JoinGroup failed due to non-fatal error: {}. Will set the member id as {} and then rejoin. " + "Sent generation was {}", error, memberId, sentGeneration); synchronized (AbstractCoordinator.this) { AbstractCoordinator.this.generation = new Generation(OffsetCommitRequest.DEFAULT_GENERATION_ID, memberId, null); } requestRejoin("need to re-join with the given member-id: " + memberId); future.raise(error); } else if (error == Errors.REBALANCE_IN_PROGRESS) { log.info("JoinGroup failed due to non-fatal error: REBALANCE_IN_PROGRESS, " + "which could indicate a replication timeout on the broker. Will retry."); future.raise(error); } else { // unexpected error, throw the exception log.error("JoinGroup failed due to unexpected error: {}", error.message()); future.raise(new KafkaException("Unexpected error in join group response: " + error.message())); } } } private RequestFuture<ByteBuffer> onJoinFollower() { // send follower's sync group with an empty assignment SyncGroupRequest.Builder requestBuilder = new SyncGroupRequest.Builder( new SyncGroupRequestData() .setGroupId(rebalanceConfig.groupId) .setMemberId(generation.memberId) .setProtocolType(protocolType()) .setProtocolName(generation.protocolName) .setGroupInstanceId(this.rebalanceConfig.groupInstanceId.orElse(null)) .setGenerationId(generation.generationId) .setAssignments(Collections.emptyList()) ); log.debug("Sending follower SyncGroup to coordinator {}: {}", this.coordinator, requestBuilder); return sendSyncGroupRequest(requestBuilder); } private RequestFuture<ByteBuffer> onLeaderElected(JoinGroupResponse joinResponse) { try { // perform the leader synchronization and send back the assignment for the group Map<String, ByteBuffer> groupAssignment = onLeaderElected( joinResponse.data().leader(), joinResponse.data().protocolName(), joinResponse.data().members(), joinResponse.data().skipAssignment() ); List<SyncGroupRequestData.SyncGroupRequestAssignment> groupAssignmentList = new ArrayList<>(); for (Map.Entry<String, ByteBuffer> assignment : groupAssignment.entrySet()) { groupAssignmentList.add(new SyncGroupRequestData.SyncGroupRequestAssignment() .setMemberId(assignment.getKey()) .setAssignment(Utils.toArray(assignment.getValue())) ); } SyncGroupRequest.Builder requestBuilder = new SyncGroupRequest.Builder( new SyncGroupRequestData() .setGroupId(rebalanceConfig.groupId) .setMemberId(generation.memberId) .setProtocolType(protocolType()) .setProtocolName(generation.protocolName) .setGroupInstanceId(this.rebalanceConfig.groupInstanceId.orElse(null)) .setGenerationId(generation.generationId) .setAssignments(groupAssignmentList) ); log.debug("Sending leader SyncGroup to coordinator {}: {}", this.coordinator, requestBuilder); return sendSyncGroupRequest(requestBuilder); } catch (RuntimeException e) { return RequestFuture.failure(e); } } private RequestFuture<ByteBuffer> sendSyncGroupRequest(SyncGroupRequest.Builder requestBuilder) { if (coordinatorUnknown()) return RequestFuture.coordinatorNotAvailable(); return client.send(coordinator, requestBuilder) .compose(new SyncGroupResponseHandler(generation)); } private boolean hasGenerationReset(Generation gen) { // the member ID might not be reset for ILLEGAL_GENERATION error, so only check generationID and protocol name here return gen.generationId == Generation.NO_GENERATION.generationId && gen.protocolName == null; } private class SyncGroupResponseHandler extends CoordinatorResponseHandler<SyncGroupResponse, ByteBuffer> { private SyncGroupResponseHandler(final Generation generation) { super(generation); } @Override public void handle(SyncGroupResponse syncResponse, RequestFuture<ByteBuffer> future) { Errors error = syncResponse.error(); if (error == Errors.NONE) { if (isProtocolTypeInconsistent(syncResponse.data().protocolType())) { log.error("SyncGroup failed due to inconsistent Protocol Type, received {} but expected {}", syncResponse.data().protocolType(), protocolType()); future.raise(Errors.INCONSISTENT_GROUP_PROTOCOL); } else { log.debug("Received successful SyncGroup response: {}", syncResponse); sensors.syncSensor.record(response.requestLatencyMs()); synchronized (AbstractCoordinator.this) { if (!hasGenerationReset(generation) && state == MemberState.COMPLETING_REBALANCE) { // check protocol name only if the generation is not reset final String protocolName = syncResponse.data().protocolName(); final boolean protocolNameInconsistent = protocolName != null && !protocolName.equals(generation.protocolName); if (protocolNameInconsistent) { log.error("SyncGroup failed due to inconsistent Protocol Name, received {} but expected {}", protocolName, generation.protocolName); future.raise(Errors.INCONSISTENT_GROUP_PROTOCOL); } else { log.info("Successfully synced group in generation {}", generation); state = MemberState.STABLE; rejoinReason = ""; rejoinNeeded = false; // record rebalance latency lastRebalanceEndMs = time.milliseconds(); sensors.successfulRebalanceSensor.record(lastRebalanceEndMs - lastRebalanceStartMs); lastRebalanceStartMs = -1L; future.complete(ByteBuffer.wrap(syncResponse.data().assignment())); } } else { log.info("Generation data was cleared by heartbeat thread to {} and state is now {} before " + "receiving SyncGroup response, marking this rebalance as failed and retry", generation, state); // use ILLEGAL_GENERATION error code to let it retry immediately future.raise(Errors.ILLEGAL_GENERATION); } } } } else { if (error == Errors.GROUP_AUTHORIZATION_FAILED) { future.raise(GroupAuthorizationException.forGroupId(rebalanceConfig.groupId)); } else if (error == Errors.REBALANCE_IN_PROGRESS) { log.info("SyncGroup failed: The group began another rebalance. Need to re-join the group. " + "Sent generation was {}", sentGeneration); future.raise(error); } else if (error == Errors.FENCED_INSTANCE_ID) { // for sync-group request, even if the generation has changed we would not expect the instance id // gets fenced, and hence we always treat this as a fatal error log.error("SyncGroup failed: The group instance id {} has been fenced by another instance. " + "Sent generation was {}", rebalanceConfig.groupInstanceId, sentGeneration); future.raise(error); } else if (error == Errors.UNKNOWN_MEMBER_ID || error == Errors.ILLEGAL_GENERATION) { log.info("SyncGroup failed: {} Need to re-join the group. Sent generation was {}", error.message(), sentGeneration); if (generationUnchanged()) { // don't reset generation member ID when ILLEGAL_GENERATION, since the member ID might still be valid resetStateOnResponseError(ApiKeys.SYNC_GROUP, error, error != Errors.ILLEGAL_GENERATION); } future.raise(error); } else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR) { log.info("SyncGroup failed: {} Marking coordinator unknown. Sent generation was {}", error.message(), sentGeneration); markCoordinatorUnknown(error); future.raise(error); } else { future.raise(new KafkaException("Unexpected error from SyncGroup: " + error.message())); } } } } /** * Discover the current coordinator for the group. Sends a FindCoordinator request to * the given broker node. The returned future should be polled to get the result of the request. * @return A request future which indicates the completion of the metadata request */ private RequestFuture<Void> sendFindCoordinatorRequest(Node node) { log.debug("Sending FindCoordinator request to broker {}", node); FindCoordinatorRequestData data = new FindCoordinatorRequestData() .setKeyType(CoordinatorType.GROUP.id()) .setKey(this.rebalanceConfig.groupId); FindCoordinatorRequest.Builder requestBuilder = new FindCoordinatorRequest.Builder(data); return client.send(node, requestBuilder) .compose(new FindCoordinatorResponseHandler()); } private class FindCoordinatorResponseHandler extends RequestFutureAdapter<ClientResponse, Void> { @Override public void onSuccess(ClientResponse resp, RequestFuture<Void> future) { log.debug("Received FindCoordinator response {}", resp); List<Coordinator> coordinators = ((FindCoordinatorResponse) resp.responseBody()).coordinators(); if (coordinators.size() != 1) { log.error("Group coordinator lookup failed: Invalid response containing more than a single coordinator"); future.raise(new IllegalStateException("Group coordinator lookup failed: Invalid response containing more than a single coordinator")); } Coordinator coordinatorData = coordinators.get(0); Errors error = Errors.forCode(coordinatorData.errorCode()); if (error == Errors.NONE) { synchronized (AbstractCoordinator.this) { // use MAX_VALUE - node.id as the coordinator id to allow separate connections // for the coordinator in the underlying network client layer int coordinatorConnectionId = Integer.MAX_VALUE - coordinatorData.nodeId(); AbstractCoordinator.this.coordinator = new Node( coordinatorConnectionId, coordinatorData.host(), coordinatorData.port()); log.info("Discovered group coordinator {}", coordinator); client.tryConnect(coordinator); heartbeat.resetSessionTimeout(); } future.complete(null); } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) { future.raise(GroupAuthorizationException.forGroupId(rebalanceConfig.groupId)); } else { log.debug("Group coordinator lookup failed: {}", coordinatorData.errorMessage()); future.raise(error); } } @Override public void onFailure(RuntimeException e, RequestFuture<Void> future) { log.debug("FindCoordinator request failed due to {}", e.toString()); if (!(e instanceof RetriableException)) { // Remember the exception if fatal so we can ensure it gets thrown by the main thread fatalFindCoordinatorException = e; } super.onFailure(e, future); } } /** * Check if we know who the coordinator is and we have an active connection * @return true if the coordinator is unknown */ public boolean coordinatorUnknown() { return checkAndGetCoordinator() == null; } /** * Get the coordinator if its connection is still active. Otherwise mark it unknown and * return null. * * @return the current coordinator or null if it is unknown */ protected synchronized Node checkAndGetCoordinator() { if (coordinator != null && client.isUnavailable(coordinator)) { markCoordinatorUnknown(true, "coordinator unavailable"); return null; } return this.coordinator; } private synchronized Node coordinator() { return this.coordinator; } protected synchronized void markCoordinatorUnknown(Errors error) { markCoordinatorUnknown(false, "error response " + error.name()); } protected synchronized void markCoordinatorUnknown(String cause) { markCoordinatorUnknown(false, cause); } protected synchronized void markCoordinatorUnknown(boolean isDisconnected, String cause) { if (this.coordinator != null) { log.info("Group coordinator {} is unavailable or invalid due to cause: {}. " + "isDisconnected: {}. Rediscovery will be attempted.", this.coordinator, cause, isDisconnected); Node oldCoordinator = this.coordinator; // Mark the coordinator dead before disconnecting requests since the callbacks for any pending // requests may attempt to do likewise. This also prevents new requests from being sent to the // coordinator while the disconnect is in progress. this.coordinator = null; // Disconnect from the coordinator to ensure that there are no in-flight requests remaining. // Pending callbacks will be invoked with a DisconnectException on the next call to poll. if (!isDisconnected) { log.info("Requesting disconnect from last known coordinator {}", oldCoordinator); client.disconnectAsync(oldCoordinator); } lastTimeOfConnectionMs = time.milliseconds(); } else { long durationOfOngoingDisconnect = time.milliseconds() - lastTimeOfConnectionMs; if (durationOfOngoingDisconnect > rebalanceConfig.rebalanceTimeoutMs) log.warn("Consumer has been disconnected from the group coordinator for {}ms", durationOfOngoingDisconnect); } } /** * Get the current generation state, regardless of whether it is currently stable. * Note that the generation information can be updated while we are still in the middle * of a rebalance, after the join-group response is received. * * @return the current generation */ protected synchronized Generation generation() { return generation; } /** * Get the current generation state if the group is stable, otherwise return null * * @return the current generation or null */ protected synchronized Generation generationIfStable() { if (this.state != MemberState.STABLE) return null; return generation; } protected synchronized boolean rebalanceInProgress() { return this.state == MemberState.PREPARING_REBALANCE || this.state == MemberState.COMPLETING_REBALANCE; } protected synchronized String memberId() { return generation.memberId; } private synchronized void resetStateAndGeneration(final String reason, final boolean shouldResetMemberId) { log.info("Resetting generation {}due to: {}", shouldResetMemberId ? "and member id " : "", reason); state = MemberState.UNJOINED; if (shouldResetMemberId) { generation = Generation.NO_GENERATION; } else { // keep member id since it might be still valid, to avoid to wait for the old member id leaving group // until rebalance timeout in next rebalance generation = new Generation(Generation.NO_GENERATION.generationId, generation.memberId, null); } } private synchronized void resetStateAndRejoin(final String reason, final boolean shouldResetMemberId) { resetStateAndGeneration(reason, shouldResetMemberId); requestRejoin(reason); needsJoinPrepare = true; } synchronized void resetStateOnResponseError(ApiKeys api, Errors error, boolean shouldResetMemberId) { final String reason = String.format("encountered %s from %s response", error, api); resetStateAndRejoin(reason, shouldResetMemberId); } synchronized void resetGenerationOnLeaveGroup() { resetStateAndRejoin("consumer pro-actively leaving the group", true); } public synchronized void requestRejoinIfNecessary(final String shortReason, final String fullReason) { if (!this.rejoinNeeded) { requestRejoin(shortReason, fullReason); } } public synchronized void requestRejoin(final String shortReason) { requestRejoin(shortReason, shortReason); } /** * Request to rejoin the group. * * @param shortReason This is the reason passed up to the group coordinator. It must be * reasonably small. * @param fullReason This is the reason logged locally. */ public synchronized void requestRejoin(final String shortReason, final String fullReason) { log.info("Request joining group due to: {}", fullReason); this.rejoinReason = shortReason; this.rejoinNeeded = true; } private boolean isProtocolTypeInconsistent(String protocolType) { return protocolType != null && !protocolType.equals(protocolType()); } /** * Close the coordinator, waiting if needed to send LeaveGroup. */ @Override public final void close() { close(time.timer(0)); } /** * @throws KafkaException if the rebalance callback throws exception */ protected void close(Timer timer) { try { closeHeartbeatThread(); } finally { // Synchronize after closing the heartbeat thread since heartbeat thread // needs this lock to complete and terminate after close flag is set. synchronized (this) { if (rebalanceConfig.leaveGroupOnClose) { onLeavePrepare(); maybeLeaveGroup("the consumer is being closed"); } // At this point, there may be pending commits (async commits or sync commits that were // interrupted using wakeup) and the leave group request which have been queued, but not // yet sent to the broker. Wait up to close timeout for these pending requests to be processed. // If coordinator is not known, requests are aborted. Node coordinator = checkAndGetCoordinator(); if (coordinator != null && !client.awaitPendingRequests(coordinator, timer)) log.warn("Close timed out with {} pending requests to coordinator, terminating client connections", client.pendingRequestCount(coordinator)); } } } /** * Sends LeaveGroupRequest and logs the {@code leaveReason}, unless this member is using static membership or is already * not part of the group (ie does not have a valid member id, is in the UNJOINED state, or the coordinator is unknown). * * @param leaveReason the reason to leave the group for logging * @throws KafkaException if the rebalance callback throws exception */ public synchronized RequestFuture<Void> maybeLeaveGroup(String leaveReason) { RequestFuture<Void> future = null; // Starting from 2.3, only dynamic members will send LeaveGroupRequest to the broker, // consumer with valid group.instance.id is viewed as static member that never sends LeaveGroup, // and the membership expiration is only controlled by session timeout. if (isDynamicMember() && !coordinatorUnknown() && state != MemberState.UNJOINED && generation.hasMemberId()) { // this is a minimal effort attempt to leave the group. we do not // attempt any resending if the request fails or times out. log.info("Member {} sending LeaveGroup request to coordinator {} due to {}", generation.memberId, coordinator, leaveReason); LeaveGroupRequest.Builder request = new LeaveGroupRequest.Builder( rebalanceConfig.groupId, Collections.singletonList(new MemberIdentity().setMemberId(generation.memberId).setReason(JoinGroupRequest.maybeTruncateReason(leaveReason))) ); future = client.send(coordinator, request).compose(new LeaveGroupResponseHandler(generation)); client.pollNoWakeup(); } resetGenerationOnLeaveGroup(); return future; } protected boolean isDynamicMember() { return !rebalanceConfig.groupInstanceId.isPresent(); } private class LeaveGroupResponseHandler extends CoordinatorResponseHandler<LeaveGroupResponse, Void> { private LeaveGroupResponseHandler(final Generation generation) { super(generation); } @Override public void handle(LeaveGroupResponse leaveResponse, RequestFuture<Void> future) { final List<MemberResponse> members = leaveResponse.memberResponses(); if (members.size() > 1) { future.raise(new IllegalStateException("The expected leave group response " + "should only contain no more than one member info, however get " + members)); } final Errors error = leaveResponse.error(); if (error == Errors.NONE) { log.debug("LeaveGroup response with {} returned successfully: {}", sentGeneration, response); future.complete(null); } else { log.error("LeaveGroup request with {} failed with error: {}", sentGeneration, error.message()); future.raise(error); } } } // visible for testing synchronized RequestFuture<Void> sendHeartbeatRequest() { log.debug("Sending Heartbeat request with generation {} and member id {} to coordinator {}", generation.generationId, generation.memberId, coordinator); HeartbeatRequest.Builder requestBuilder = new HeartbeatRequest.Builder(new HeartbeatRequestData() .setGroupId(rebalanceConfig.groupId) .setMemberId(this.generation.memberId) .setGroupInstanceId(this.rebalanceConfig.groupInstanceId.orElse(null)) .setGenerationId(this.generation.generationId)); return client.send(coordinator, requestBuilder) .compose(new HeartbeatResponseHandler(generation)); } private class HeartbeatResponseHandler extends CoordinatorResponseHandler<HeartbeatResponse, Void> { private HeartbeatResponseHandler(final Generation generation) { super(generation); } @Override public void handle(HeartbeatResponse heartbeatResponse, RequestFuture<Void> future) { sensors.heartbeatSensor.record(response.requestLatencyMs()); Errors error = heartbeatResponse.error(); if (error == Errors.NONE) { log.debug("Received successful Heartbeat response"); future.complete(null); } else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR) { log.info("Attempt to heartbeat failed since coordinator {} is either not started or not valid", coordinator()); markCoordinatorUnknown(error); future.raise(error); } else if (error == Errors.REBALANCE_IN_PROGRESS) { // since we may be sending the request during rebalance, we should check // this case and ignore the REBALANCE_IN_PROGRESS error synchronized (AbstractCoordinator.this) { if (state == MemberState.STABLE) { requestRejoin("group is already rebalancing"); future.raise(error); } else { log.debug("Ignoring heartbeat response with error {} during {} state", error, state); future.complete(null); } } } else if (error == Errors.ILLEGAL_GENERATION || error == Errors.UNKNOWN_MEMBER_ID || error == Errors.FENCED_INSTANCE_ID) { if (generationUnchanged()) { log.info("Attempt to heartbeat with {} and group instance id {} failed due to {}, resetting generation", sentGeneration, rebalanceConfig.groupInstanceId, error); // don't reset generation member ID when ILLEGAL_GENERATION, since the member ID is still valid resetStateOnResponseError(ApiKeys.HEARTBEAT, error, error != Errors.ILLEGAL_GENERATION); future.raise(error); } else { // if the generation has changed, then ignore this error log.info("Attempt to heartbeat with stale {} and group instance id {} failed due to {}, ignoring the error", sentGeneration, rebalanceConfig.groupInstanceId, error); future.complete(null); } } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) { future.raise(GroupAuthorizationException.forGroupId(rebalanceConfig.groupId)); } else { future.raise(new KafkaException("Unexpected error in heartbeat response: " + error.message())); } } } protected abstract class CoordinatorResponseHandler<R, T> extends RequestFutureAdapter<ClientResponse, T> { CoordinatorResponseHandler(final Generation generation) { this.sentGeneration = generation; } final Generation sentGeneration; ClientResponse response; public abstract void handle(R response, RequestFuture<T> future); @Override public void onFailure(RuntimeException e, RequestFuture<T> future) { // mark the coordinator as dead if (e instanceof DisconnectException) { markCoordinatorUnknown(true, e.getMessage()); } future.raise(e); } @Override @SuppressWarnings("unchecked") public void onSuccess(ClientResponse clientResponse, RequestFuture<T> future) { try { this.response = clientResponse; R responseObj = (R) clientResponse.responseBody(); handle(responseObj, future); } catch (RuntimeException e) { if (!future.isDone()) future.raise(e); } } boolean generationUnchanged() { synchronized (AbstractCoordinator.this) { return generation.equals(sentGeneration); } } } protected Meter createMeter(Metrics metrics, String groupName, String baseName, String descriptiveName) { return new Meter(new WindowedCount(), metrics.metricName(baseName + "-rate", groupName, String.format("The number of %s per second", descriptiveName)), metrics.metricName(baseName + "-total", groupName, String.format("The total number of %s", descriptiveName))); } private class GroupCoordinatorMetrics { public final String metricGrpName; public final Sensor heartbeatSensor; public final Sensor joinSensor; public final Sensor syncSensor; public final Sensor successfulRebalanceSensor; public final Sensor failedRebalanceSensor; public GroupCoordinatorMetrics(Metrics metrics, String metricGrpPrefix) { this.metricGrpName = metricGrpPrefix + "-coordinator-metrics"; this.heartbeatSensor = metrics.sensor("heartbeat-latency"); this.heartbeatSensor.add(metrics.metricName("heartbeat-response-time-max", this.metricGrpName, "The max time taken to receive a response to a heartbeat request"), new Max()); this.heartbeatSensor.add(createMeter(metrics, metricGrpName, "heartbeat", "heartbeats")); this.joinSensor = metrics.sensor("join-latency"); this.joinSensor.add(metrics.metricName("join-time-avg", this.metricGrpName, "The average time taken for a group rejoin"), new Avg()); this.joinSensor.add(metrics.metricName("join-time-max", this.metricGrpName, "The max time taken for a group rejoin"), new Max()); this.joinSensor.add(createMeter(metrics, metricGrpName, "join", "group joins")); this.syncSensor = metrics.sensor("sync-latency"); this.syncSensor.add(metrics.metricName("sync-time-avg", this.metricGrpName, "The average time taken for a group sync"), new Avg()); this.syncSensor.add(metrics.metricName("sync-time-max", this.metricGrpName, "The max time taken for a group sync"), new Max()); this.syncSensor.add(createMeter(metrics, metricGrpName, "sync", "group syncs")); this.successfulRebalanceSensor = metrics.sensor("rebalance-latency"); this.successfulRebalanceSensor.add(metrics.metricName("rebalance-latency-avg", this.metricGrpName, "The average time taken for a group to complete a successful rebalance, which may be composed of " + "several failed re-trials until it succeeded"), new Avg()); this.successfulRebalanceSensor.add(metrics.metricName("rebalance-latency-max", this.metricGrpName, "The max time taken for a group to complete a successful rebalance, which may be composed of " + "several failed re-trials until it succeeded"), new Max()); this.successfulRebalanceSensor.add(metrics.metricName("rebalance-latency-total", this.metricGrpName, "The total number of milliseconds this consumer has spent in successful rebalances since creation"), new CumulativeSum()); this.successfulRebalanceSensor.add( metrics.metricName("rebalance-total", this.metricGrpName, "The total number of successful rebalance events, each event is composed of " + "several failed re-trials until it succeeded"), new CumulativeCount() ); this.successfulRebalanceSensor.add( metrics.metricName( "rebalance-rate-per-hour", this.metricGrpName, "The number of successful rebalance events per hour, each event is composed of " + "several failed re-trials until it succeeded"), new Rate(TimeUnit.HOURS, new WindowedCount()) ); this.failedRebalanceSensor = metrics.sensor("failed-rebalance"); this.failedRebalanceSensor.add( metrics.metricName("failed-rebalance-total", this.metricGrpName, "The total number of failed rebalance events"), new CumulativeCount() ); this.failedRebalanceSensor.add( metrics.metricName( "failed-rebalance-rate-per-hour", this.metricGrpName, "The number of failed rebalance events per hour"), new Rate(TimeUnit.HOURS, new WindowedCount()) ); Measurable lastRebalance = (config, now) -> { if (lastRebalanceEndMs == -1L) // if no rebalance is ever triggered, we just return -1. return -1d; else return TimeUnit.SECONDS.convert(now - lastRebalanceEndMs, TimeUnit.MILLISECONDS); }; metrics.addMetric(metrics.metricName("last-rebalance-seconds-ago", this.metricGrpName, "The number of seconds since the last successful rebalance event"), lastRebalance); Measurable lastHeartbeat = (config, now) -> { if (heartbeat.lastHeartbeatSend() == 0L) // if no heartbeat is ever triggered, just return -1. return -1d; else return TimeUnit.SECONDS.convert(now - heartbeat.lastHeartbeatSend(), TimeUnit.MILLISECONDS); }; metrics.addMetric(metrics.metricName("last-heartbeat-seconds-ago", this.metricGrpName, "The number of seconds since the last coordinator heartbeat was sent"), lastHeartbeat); } } private class HeartbeatThread extends KafkaThread implements AutoCloseable { private boolean enabled = false; private boolean closed = false; private final AtomicReference<RuntimeException> failed = new AtomicReference<>(null); private HeartbeatThread() { super(HEARTBEAT_THREAD_PREFIX + (rebalanceConfig.groupId.isEmpty() ? "" : " | " + rebalanceConfig.groupId), true); } public void enable() { synchronized (AbstractCoordinator.this) { log.debug("Enabling heartbeat thread"); this.enabled = true; heartbeat.resetTimeouts(); AbstractCoordinator.this.notify(); } } public void disable() { synchronized (AbstractCoordinator.this) { log.debug("Disabling heartbeat thread"); this.enabled = false; } } public void close() { synchronized (AbstractCoordinator.this) { this.closed = true; AbstractCoordinator.this.notify(); } } private boolean hasFailed() { return failed.get() != null; } private RuntimeException failureCause() { return failed.get(); } @Override public void run() { try { log.debug("Heartbeat thread started"); while (true) { synchronized (AbstractCoordinator.this) { if (closed) return; if (!enabled) { AbstractCoordinator.this.wait(); continue; } // we do not need to heartbeat we are not part of a group yet; // also if we already have fatal error, the client will be // crashed soon, hence we do not need to continue heartbeating either if (state.hasNotJoinedGroup() || hasFailed()) { disable(); continue; } client.pollNoWakeup(); long now = time.milliseconds(); if (coordinatorUnknown()) { if (findCoordinatorFuture != null) { // clear the future so that after the backoff, if the hb still sees coordinator unknown in // the next iteration it will try to re-discover the coordinator in case the main thread cannot clearFindCoordinatorFuture(); } else { lookupCoordinator(); } // backoff properly AbstractCoordinator.this.wait(rebalanceConfig.retryBackoffMs); } else if (heartbeat.sessionTimeoutExpired(now)) { // the session timeout has expired without seeing a successful heartbeat, so we should // probably make sure the coordinator is still healthy. markCoordinatorUnknown("session timed out without receiving a " + "heartbeat response"); } else if (heartbeat.pollTimeoutExpired(now)) { // the poll timeout has expired, which means that the foreground thread has stalled // in between calls to poll(). log.warn("consumer poll timeout has expired. This means the time between subsequent calls to poll() " + "was longer than the configured max.poll.interval.ms, which typically implies that " + "the poll loop is spending too much time processing messages. You can address this " + "either by increasing max.poll.interval.ms or by reducing the maximum size of batches " + "returned in poll() with max.poll.records."); maybeLeaveGroup("consumer poll timeout has expired."); } else if (!heartbeat.shouldHeartbeat(now)) { // poll again after waiting for the retry backoff in case the heartbeat failed or the // coordinator disconnected AbstractCoordinator.this.wait(rebalanceConfig.retryBackoffMs); } else { heartbeat.sentHeartbeat(now); final RequestFuture<Void> heartbeatFuture = sendHeartbeatRequest(); heartbeatFuture.addListener(new RequestFutureListener<Void>() { @Override public void onSuccess(Void value) { synchronized (AbstractCoordinator.this) { heartbeat.receiveHeartbeat(); } } @Override public void onFailure(RuntimeException e) { synchronized (AbstractCoordinator.this) { if (e instanceof RebalanceInProgressException) { // it is valid to continue heartbeating while the group is rebalancing. This // ensures that the coordinator keeps the member in the group for as long // as the duration of the rebalance timeout. If we stop sending heartbeats, // however, then the session timeout may expire before we can rejoin. heartbeat.receiveHeartbeat(); } else if (e instanceof FencedInstanceIdException) { log.error("Caught fenced group.instance.id {} error in heartbeat thread", rebalanceConfig.groupInstanceId); heartbeatThread.failed.set(e); } else { heartbeat.failHeartbeat(); // wake up the thread if it's sleeping to reschedule the heartbeat AbstractCoordinator.this.notify(); } } } }); } } } } catch (AuthenticationException e) { log.error("An authentication error occurred in the heartbeat thread", e); this.failed.set(e); } catch (GroupAuthorizationException e) { log.error("A group authorization error occurred in the heartbeat thread", e); this.failed.set(e); } catch (InterruptedException | InterruptException e) { Thread.interrupted(); log.error("Unexpected interrupt received in heartbeat thread", e); this.failed.set(new RuntimeException(e)); } catch (Throwable e) { log.error("Heartbeat thread failed due to unexpected error", e); if (e instanceof RuntimeException) this.failed.set((RuntimeException) e); else this.failed.set(new RuntimeException(e)); } finally { log.debug("Heartbeat thread has closed"); this.closed = true; } } } protected static class Generation { public static final Generation NO_GENERATION = new Generation( OffsetCommitRequest.DEFAULT_GENERATION_ID, JoinGroupRequest.UNKNOWN_MEMBER_ID, null); public final int generationId; public final String memberId; public final String protocolName; public Generation(int generationId, String memberId, String protocolName) { this.generationId = generationId; this.memberId = memberId; this.protocolName = protocolName; } /** * @return true if this generation has a valid member id, false otherwise. A member might have an id before * it becomes part of a group generation. */ public boolean hasMemberId() { return !memberId.isEmpty(); } @Override public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final Generation that = (Generation) o; return generationId == that.generationId && Objects.equals(memberId, that.memberId) && Objects.equals(protocolName, that.protocolName); } @Override public int hashCode() { return Objects.hash(generationId, memberId, protocolName); } @Override public String toString() { return "Generation{" + "generationId=" + generationId + ", memberId='" + memberId + '\'' + ", protocol='" + protocolName + '\'' + '}'; } } @SuppressWarnings("serial") private static class UnjoinedGroupException extends RetriableException { } // For testing only below final Heartbeat heartbeat() { return heartbeat; } final String rejoinReason() { return rejoinReason; } final synchronized void setLastRebalanceTime(final long timestamp) { lastRebalanceEndMs = timestamp; } /** * Check whether given generation id is matching the record within current generation. * * @param generationId generation id * @return true if the two ids are matching. */ final boolean hasMatchingGenerationId(int generationId) { return !generation.equals(Generation.NO_GENERATION) && generation.generationId == generationId; } final boolean hasUnknownGeneration() { return generation.equals(Generation.NO_GENERATION); } /** * @return true if the current generation's member ID is valid, false otherwise */ final boolean hasValidMemberId() { return !hasUnknownGeneration() && generation.hasMemberId(); } final synchronized void setNewGeneration(final Generation generation) { this.generation = generation; } final synchronized void setNewState(final MemberState state) { this.state = state; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/AbstractFetch.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.FetchSessionHandler; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.OffsetOutOfRangeException; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.RecordTooLargeException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.common.requests.FetchResponse; import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import org.slf4j.helpers.MessageFormatter; import java.io.Closeable; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Queue; import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; /** * {@code AbstractFetch} represents the basic state and logic for record fetching processing. * @param <K> Type for the message key * @param <V> Type for the message value */ public abstract class AbstractFetch<K, V> implements Closeable { private final Logger log; protected final LogContext logContext; protected final ConsumerNetworkClient client; protected final ConsumerMetadata metadata; protected final SubscriptionState subscriptions; protected final FetchConfig<K, V> fetchConfig; protected final Time time; protected final FetchMetricsManager metricsManager; private final BufferSupplier decompressionBufferSupplier; private final ConcurrentLinkedQueue<CompletedFetch<K, V>> completedFetches; private final Map<Integer, FetchSessionHandler> sessionHandlers; private final Set<Integer> nodesWithPendingFetchRequests; private CompletedFetch<K, V> nextInLineFetch; public AbstractFetch(final LogContext logContext, final ConsumerNetworkClient client, final ConsumerMetadata metadata, final SubscriptionState subscriptions, final FetchConfig<K, V> fetchConfig, final FetchMetricsManager metricsManager, final Time time) { this.log = logContext.logger(AbstractFetch.class); this.logContext = logContext; this.client = client; this.metadata = metadata; this.subscriptions = subscriptions; this.fetchConfig = fetchConfig; this.decompressionBufferSupplier = BufferSupplier.create(); this.completedFetches = new ConcurrentLinkedQueue<>(); this.sessionHandlers = new HashMap<>(); this.nodesWithPendingFetchRequests = new HashSet<>(); this.metricsManager = metricsManager; this.time = time; } /** * Return whether we have any completed fetches pending return to the user. This method is thread-safe. Has * visibility for testing. * * @return true if there are completed fetches, false otherwise */ boolean hasCompletedFetches() { return !completedFetches.isEmpty(); } /** * Return whether we have any completed fetches that are fetchable. This method is thread-safe. * @return true if there are completed fetches that can be returned, false otherwise */ public boolean hasAvailableFetches() { return completedFetches.stream().anyMatch(fetch -> subscriptions.isFetchable(fetch.partition)); } /** * Implements the core logic for a successful fetch request/response. * * @param fetchTarget {@link Node} from which the fetch data was requested * @param data {@link FetchSessionHandler.FetchRequestData} that represents the session data * @param resp {@link ClientResponse} from which the {@link FetchResponse} will be retrieved */ protected void handleFetchResponse(final Node fetchTarget, final FetchSessionHandler.FetchRequestData data, final ClientResponse resp) { try { final FetchResponse response = (FetchResponse) resp.responseBody(); final FetchSessionHandler handler = sessionHandler(fetchTarget.id()); if (handler == null) { log.error("Unable to find FetchSessionHandler for node {}. Ignoring fetch response.", fetchTarget.id()); return; } final short requestVersion = resp.requestHeader().apiVersion(); if (!handler.handleResponse(response, requestVersion)) { if (response.error() == Errors.FETCH_SESSION_TOPIC_ID_ERROR) { metadata.requestUpdate(); } return; } final Map<TopicPartition, FetchResponseData.PartitionData> responseData = response.responseData(handler.sessionTopicNames(), requestVersion); final Set<TopicPartition> partitions = new HashSet<>(responseData.keySet()); final FetchMetricsAggregator metricAggregator = new FetchMetricsAggregator(metricsManager, partitions); for (Map.Entry<TopicPartition, FetchResponseData.PartitionData> entry : responseData.entrySet()) { TopicPartition partition = entry.getKey(); FetchRequest.PartitionData requestData = data.sessionPartitions().get(partition); if (requestData == null) { String message; if (data.metadata().isFull()) { message = MessageFormatter.arrayFormat( "Response for missing full request partition: partition={}; metadata={}", new Object[]{partition, data.metadata()}).getMessage(); } else { message = MessageFormatter.arrayFormat( "Response for missing session request partition: partition={}; metadata={}; toSend={}; toForget={}; toReplace={}", new Object[]{partition, data.metadata(), data.toSend(), data.toForget(), data.toReplace()}).getMessage(); } // Received fetch response for missing session partition throw new IllegalStateException(message); } long fetchOffset = requestData.fetchOffset; FetchResponseData.PartitionData partitionData = entry.getValue(); log.debug("Fetch {} at offset {} for partition {} returned fetch data {}", fetchConfig.isolationLevel, fetchOffset, partition, partitionData); CompletedFetch<K, V> completedFetch = new CompletedFetch<>( logContext, subscriptions, fetchConfig, decompressionBufferSupplier, partition, partitionData, metricAggregator, fetchOffset, requestVersion); completedFetches.add(completedFetch); } metricsManager.recordLatency(resp.requestLatencyMs()); } finally { log.debug("Removing pending request for node {}", fetchTarget); nodesWithPendingFetchRequests.remove(fetchTarget.id()); } } /** * Implements the core logic for a failed fetch request/response. * * @param fetchTarget {@link Node} from which the fetch data was requested * @param e {@link RuntimeException} representing the error that resulted in the failure */ protected void handleFetchResponse(final Node fetchTarget, final RuntimeException e) { try { final FetchSessionHandler handler = sessionHandler(fetchTarget.id()); if (handler != null) { handler.handleError(e); handler.sessionTopicPartitions().forEach(subscriptions::clearPreferredReadReplica); } } finally { log.debug("Removing pending request for node {}", fetchTarget); nodesWithPendingFetchRequests.remove(fetchTarget.id()); } } /** * Creates a new {@link FetchRequest fetch request} in preparation for sending to the Kafka cluster. * * @param fetchTarget {@link Node} from which the fetch data will be requested * @param requestData {@link FetchSessionHandler.FetchRequestData} that represents the session data * @return {@link FetchRequest.Builder} that can be submitted to the broker */ protected FetchRequest.Builder createFetchRequest(final Node fetchTarget, final FetchSessionHandler.FetchRequestData requestData) { // Version 12 is the maximum version that could be used without topic IDs. See FetchRequest.json for schema // changelog. final short maxVersion = requestData.canUseTopicIds() ? ApiKeys.FETCH.latestVersion() : (short) 12; final FetchRequest.Builder request = FetchRequest.Builder .forConsumer(maxVersion, fetchConfig.maxWaitMs, fetchConfig.minBytes, requestData.toSend()) .isolationLevel(fetchConfig.isolationLevel) .setMaxBytes(fetchConfig.maxBytes) .metadata(requestData.metadata()) .removed(requestData.toForget()) .replaced(requestData.toReplace()) .rackId(fetchConfig.clientRackId); log.debug("Sending {} {} to broker {}", fetchConfig.isolationLevel, requestData, fetchTarget); // We add the node to the set of nodes with pending fetch requests before adding the // listener because the future may have been fulfilled on another thread (e.g. during a // disconnection being handled by the heartbeat thread) which will mean the listener // will be invoked synchronously. log.debug("Adding pending request for node {}", fetchTarget); nodesWithPendingFetchRequests.add(fetchTarget.id()); return request; } /** * Return the fetched records, empty the record buffer and update the consumed position. * * </p> * * NOTE: returning an {@link Fetch#isEmpty empty} fetch guarantees the consumed position is not updated. * * @return A {@link Fetch} for the requested partitions * @throws OffsetOutOfRangeException If there is OffsetOutOfRange error in fetchResponse and * the defaultResetPolicy is NONE * @throws TopicAuthorizationException If there is TopicAuthorization error in fetchResponse. */ public Fetch<K, V> collectFetch() { Fetch<K, V> fetch = Fetch.empty(); Queue<CompletedFetch<K, V>> pausedCompletedFetches = new ArrayDeque<>(); int recordsRemaining = fetchConfig.maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineFetch == null || nextInLineFetch.isConsumed) { CompletedFetch<K, V> records = completedFetches.peek(); if (records == null) break; if (!records.initialized) { try { nextInLineFetch = initializeCompletedFetch(records); } catch (Exception e) { // Remove a completedFetch upon a parse with exception if (1) it contains no records, and // (2) there are no fetched records with actual content preceding this exception. // The first condition ensures that the completedFetches is not stuck with the same completedFetch // in cases such as the TopicAuthorizationException, and the second condition ensures that no // potential data loss due to an exception in a following record. if (fetch.isEmpty() && FetchResponse.recordsOrFail(records.partitionData).sizeInBytes() == 0) { completedFetches.poll(); } throw e; } } else { nextInLineFetch = records; } completedFetches.poll(); } else if (subscriptions.isPaused(nextInLineFetch.partition)) { // when the partition is paused we add the records back to the completedFetches queue instead of draining // them so that they can be returned on a subsequent poll if the partition is resumed at that time log.debug("Skipping fetching records for assigned partition {} because it is paused", nextInLineFetch.partition); pausedCompletedFetches.add(nextInLineFetch); nextInLineFetch = null; } else { Fetch<K, V> nextFetch = fetchRecords(recordsRemaining); recordsRemaining -= nextFetch.numRecords(); fetch.add(nextFetch); } } } catch (KafkaException e) { if (fetch.isEmpty()) throw e; } finally { // add any polled completed fetches for paused partitions back to the completed fetches queue to be // re-evaluated in the next poll completedFetches.addAll(pausedCompletedFetches); } return fetch; } private Fetch<K, V> fetchRecords(final int maxRecords) { if (!subscriptions.isAssigned(nextInLineFetch.partition)) { // this can happen when a rebalance happened before fetched records are returned to the consumer's poll call log.debug("Not returning fetched records for partition {} since it is no longer assigned", nextInLineFetch.partition); } else if (!subscriptions.isFetchable(nextInLineFetch.partition)) { // this can happen when a partition is paused before fetched records are returned to the consumer's // poll call or if the offset is being reset log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", nextInLineFetch.partition); } else { SubscriptionState.FetchPosition position = subscriptions.position(nextInLineFetch.partition); if (position == null) { throw new IllegalStateException("Missing position for fetchable partition " + nextInLineFetch.partition); } if (nextInLineFetch.nextFetchOffset == position.offset) { List<ConsumerRecord<K, V>> partRecords = nextInLineFetch.fetchRecords(maxRecords); log.trace("Returning {} fetched records at offset {} for assigned partition {}", partRecords.size(), position, nextInLineFetch.partition); boolean positionAdvanced = false; if (nextInLineFetch.nextFetchOffset > position.offset) { SubscriptionState.FetchPosition nextPosition = new SubscriptionState.FetchPosition( nextInLineFetch.nextFetchOffset, nextInLineFetch.lastEpoch, position.currentLeader); log.trace("Updating fetch position from {} to {} for partition {} and returning {} records from `poll()`", position, nextPosition, nextInLineFetch.partition, partRecords.size()); subscriptions.position(nextInLineFetch.partition, nextPosition); positionAdvanced = true; } Long partitionLag = subscriptions.partitionLag(nextInLineFetch.partition, fetchConfig.isolationLevel); if (partitionLag != null) metricsManager.recordPartitionLag(nextInLineFetch.partition, partitionLag); Long lead = subscriptions.partitionLead(nextInLineFetch.partition); if (lead != null) { metricsManager.recordPartitionLead(nextInLineFetch.partition, lead); } return Fetch.forPartition(nextInLineFetch.partition, partRecords, positionAdvanced); } else { // these records aren't next in line based on the last consumed position, ignore them // they must be from an obsolete request log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", nextInLineFetch.partition, nextInLineFetch.nextFetchOffset, position); } } log.trace("Draining fetched records for partition {}", nextInLineFetch.partition); nextInLineFetch.drain(); return Fetch.empty(); } private List<TopicPartition> fetchablePartitions() { Set<TopicPartition> exclude = new HashSet<>(); if (nextInLineFetch != null && !nextInLineFetch.isConsumed) { exclude.add(nextInLineFetch.partition); } for (CompletedFetch<K, V> completedFetch : completedFetches) { exclude.add(completedFetch.partition); } return subscriptions.fetchablePartitions(tp -> !exclude.contains(tp)); } /** * Determine from which replica to read: the <i>preferred</i> or the <i>leader</i>. The preferred replica is used * iff: * * <ul> * <li>A preferred replica was previously set</li> * <li>We're still within the lease time for the preferred replica</li> * <li>The replica is still online/available</li> * </ul> * * If any of the above are not met, the leader node is returned. * * @param partition {@link TopicPartition} for which we want to fetch data * @param leaderReplica {@link Node} for the leader of the given partition * @param currentTimeMs Current time in milliseconds; used to determine if we're within the optional lease window * @return Replic {@link Node node} from which to request the data * @see SubscriptionState#preferredReadReplica * @see SubscriptionState#updatePreferredReadReplica */ Node selectReadReplica(final TopicPartition partition, final Node leaderReplica, final long currentTimeMs) { Optional<Integer> nodeId = subscriptions.preferredReadReplica(partition, currentTimeMs); if (nodeId.isPresent()) { Optional<Node> node = nodeId.flatMap(id -> metadata.fetch().nodeIfOnline(partition, id)); if (node.isPresent()) { return node.get(); } else { log.trace("Not fetching from {} for partition {} since it is marked offline or is missing from our metadata," + " using the leader instead.", nodeId, partition); // Note that this condition may happen due to stale metadata, so we clear preferred replica and // refresh metadata. requestMetadataUpdate(partition); return leaderReplica; } } else { return leaderReplica; } } /** * Create fetch requests for all nodes for which we have assigned partitions * that have no existing requests in flight. */ protected Map<Node, FetchSessionHandler.FetchRequestData> prepareFetchRequests() { // Update metrics in case there was an assignment change metricsManager.maybeUpdateAssignment(subscriptions); Map<Node, FetchSessionHandler.Builder> fetchable = new LinkedHashMap<>(); long currentTimeMs = time.milliseconds(); Map<String, Uuid> topicIds = metadata.topicIds(); for (TopicPartition partition : fetchablePartitions()) { SubscriptionState.FetchPosition position = subscriptions.position(partition); if (position == null) throw new IllegalStateException("Missing position for fetchable partition " + partition); Optional<Node> leaderOpt = position.currentLeader.leader; if (!leaderOpt.isPresent()) { log.debug("Requesting metadata update for partition {} since the position {} is missing the current leader node", partition, position); metadata.requestUpdate(); continue; } // Use the preferred read replica if set, otherwise the partition's leader Node node = selectReadReplica(partition, leaderOpt.get(), currentTimeMs); if (client.isUnavailable(node)) { client.maybeThrowAuthFailure(node); // If we try to send during the reconnect backoff window, then the request is just // going to be failed anyway before being sent, so skip sending the request for now log.trace("Skipping fetch for partition {} because node {} is awaiting reconnect backoff", partition, node); } else if (nodesWithPendingFetchRequests.contains(node.id())) { log.trace("Skipping fetch for partition {} because previous request to {} has not been processed", partition, node); } else { // if there is a leader and no in-flight requests, issue a new fetch FetchSessionHandler.Builder builder = fetchable.computeIfAbsent(node, k -> { FetchSessionHandler fetchSessionHandler = sessionHandlers.computeIfAbsent(node.id(), n -> new FetchSessionHandler(logContext, n)); return fetchSessionHandler.newBuilder(); }); Uuid topicId = topicIds.getOrDefault(partition.topic(), Uuid.ZERO_UUID); FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData(topicId, position.offset, FetchRequest.INVALID_LOG_START_OFFSET, fetchConfig.fetchSize, position.currentLeader.epoch, Optional.empty()); builder.add(partition, partitionData); log.debug("Added {} fetch request for partition {} at position {} to node {}", fetchConfig.isolationLevel, partition, position, node); } } Map<Node, FetchSessionHandler.FetchRequestData> reqs = new LinkedHashMap<>(); for (Map.Entry<Node, FetchSessionHandler.Builder> entry : fetchable.entrySet()) { reqs.put(entry.getKey(), entry.getValue().build()); } return reqs; } /** * Initialize a CompletedFetch object. */ private CompletedFetch<K, V> initializeCompletedFetch(final CompletedFetch<K, V> completedFetch) { final TopicPartition tp = completedFetch.partition; final Errors error = Errors.forCode(completedFetch.partitionData.errorCode()); boolean recordMetrics = true; try { if (!subscriptions.hasValidPosition(tp)) { // this can happen when a rebalance happened while fetch is still in-flight log.debug("Ignoring fetched records for partition {} since it no longer has valid position", tp); return null; } else if (error == Errors.NONE) { final CompletedFetch<K, V> ret = handleInitializeCompletedFetchSuccess(completedFetch); recordMetrics = ret == null; return ret; } else { handleInitializeCompletedFetchErrors(completedFetch, error); return null; } } finally { if (recordMetrics) { completedFetch.recordAggregatedMetrics(0, 0); } if (error != Errors.NONE) // we move the partition to the end if there was an error. This way, it's more likely that partitions for // the same topic can remain together (allowing for more efficient serialization). subscriptions.movePartitionToEnd(tp); } } private CompletedFetch<K, V> handleInitializeCompletedFetchSuccess(final CompletedFetch<K, V> completedFetch) { final TopicPartition tp = completedFetch.partition; final long fetchOffset = completedFetch.nextFetchOffset; // we are interested in this fetch only if the beginning offset matches the // current consumed position SubscriptionState.FetchPosition position = subscriptions.position(tp); if (position == null || position.offset != fetchOffset) { log.debug("Discarding stale fetch response for partition {} since its offset {} does not match " + "the expected offset {}", tp, fetchOffset, position); return null; } final FetchResponseData.PartitionData partition = completedFetch.partitionData; log.trace("Preparing to read {} bytes of data for partition {} with offset {}", FetchResponse.recordsSize(partition), tp, position); Iterator<? extends RecordBatch> batches = FetchResponse.recordsOrFail(partition).batches().iterator(); if (!batches.hasNext() && FetchResponse.recordsSize(partition) > 0) { if (completedFetch.requestVersion < 3) { // Implement the pre KIP-74 behavior of throwing a RecordTooLargeException. Map<TopicPartition, Long> recordTooLargePartitions = Collections.singletonMap(tp, fetchOffset); throw new RecordTooLargeException("There are some messages at [Partition=Offset]: " + recordTooLargePartitions + " whose size is larger than the fetch size " + fetchConfig.fetchSize + " and hence cannot be returned. Please considering upgrading your broker to 0.10.1.0 or " + "newer to avoid this issue. Alternately, increase the fetch size on the client (using " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG + ")", recordTooLargePartitions); } else { // This should not happen with brokers that support FetchRequest/Response V3 or higher (i.e. KIP-74) throw new KafkaException("Failed to make progress reading messages at " + tp + "=" + fetchOffset + ". Received a non-empty fetch response from the server, but no " + "complete records were found."); } } if (partition.highWatermark() >= 0) { log.trace("Updating high watermark for partition {} to {}", tp, partition.highWatermark()); subscriptions.updateHighWatermark(tp, partition.highWatermark()); } if (partition.logStartOffset() >= 0) { log.trace("Updating log start offset for partition {} to {}", tp, partition.logStartOffset()); subscriptions.updateLogStartOffset(tp, partition.logStartOffset()); } if (partition.lastStableOffset() >= 0) { log.trace("Updating last stable offset for partition {} to {}", tp, partition.lastStableOffset()); subscriptions.updateLastStableOffset(tp, partition.lastStableOffset()); } if (FetchResponse.isPreferredReplica(partition)) { subscriptions.updatePreferredReadReplica(completedFetch.partition, partition.preferredReadReplica(), () -> { long expireTimeMs = time.milliseconds() + metadata.metadataExpireMs(); log.debug("Updating preferred read replica for partition {} to {}, set to expire at {}", tp, partition.preferredReadReplica(), expireTimeMs); return expireTimeMs; }); } completedFetch.initialized = true; return completedFetch; } private void handleInitializeCompletedFetchErrors(final CompletedFetch<K, V> completedFetch, final Errors error) { final TopicPartition tp = completedFetch.partition; final long fetchOffset = completedFetch.nextFetchOffset; if (error == Errors.NOT_LEADER_OR_FOLLOWER || error == Errors.REPLICA_NOT_AVAILABLE || error == Errors.KAFKA_STORAGE_ERROR || error == Errors.FENCED_LEADER_EPOCH || error == Errors.OFFSET_NOT_AVAILABLE) { log.debug("Error in fetch for partition {}: {}", tp, error.exceptionName()); requestMetadataUpdate(tp); } else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { log.warn("Received unknown topic or partition error in fetch for partition {}", tp); requestMetadataUpdate(tp); } else if (error == Errors.UNKNOWN_TOPIC_ID) { log.warn("Received unknown topic ID error in fetch for partition {}", tp); requestMetadataUpdate(tp); } else if (error == Errors.INCONSISTENT_TOPIC_ID) { log.warn("Received inconsistent topic ID error in fetch for partition {}", tp); requestMetadataUpdate(tp); } else if (error == Errors.OFFSET_OUT_OF_RANGE) { Optional<Integer> clearedReplicaId = subscriptions.clearPreferredReadReplica(tp); if (!clearedReplicaId.isPresent()) { // If there's no preferred replica to clear, we're fetching from the leader so handle this error normally SubscriptionState.FetchPosition position = subscriptions.position(tp); if (position == null || fetchOffset != position.offset) { log.debug("Discarding stale fetch response for partition {} since the fetched offset {} " + "does not match the current offset {}", tp, fetchOffset, position); } else { handleOffsetOutOfRange(position, tp); } } else { log.debug("Unset the preferred read replica {} for partition {} since we got {} when fetching {}", clearedReplicaId.get(), tp, error, fetchOffset); } } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { //we log the actual partition and not just the topic to help with ACL propagation issues in large clusters log.warn("Not authorized to read from partition {}.", tp); throw new TopicAuthorizationException(Collections.singleton(tp.topic())); } else if (error == Errors.UNKNOWN_LEADER_EPOCH) { log.debug("Received unknown leader epoch error in fetch for partition {}", tp); } else if (error == Errors.UNKNOWN_SERVER_ERROR) { log.warn("Unknown server error while fetching offset {} for topic-partition {}", fetchOffset, tp); } else if (error == Errors.CORRUPT_MESSAGE) { throw new KafkaException("Encountered corrupt message when fetching offset " + fetchOffset + " for topic-partition " + tp); } else { throw new IllegalStateException("Unexpected error code " + error.code() + " while fetching at offset " + fetchOffset + " from topic-partition " + tp); } } private void handleOffsetOutOfRange(final SubscriptionState.FetchPosition fetchPosition, final TopicPartition topicPartition) { String errorMessage = "Fetch position " + fetchPosition + " is out of range for partition " + topicPartition; if (subscriptions.hasDefaultOffsetResetPolicy()) { log.info("{}, resetting offset", errorMessage); subscriptions.requestOffsetReset(topicPartition); } else { log.info("{}, raising error to the application since no reset policy is configured", errorMessage); throw new OffsetOutOfRangeException(errorMessage, Collections.singletonMap(topicPartition, fetchPosition.offset)); } } /** * Clear the buffered data which are not a part of newly assigned partitions. Any previously * {@link CompletedFetch fetched data} is dropped if it is for a partition that is no longer in * {@code assignedPartitions}. * * @param assignedPartitions Newly-assigned {@link TopicPartition} */ public void clearBufferedDataForUnassignedPartitions(final Collection<TopicPartition> assignedPartitions) { final Iterator<CompletedFetch<K, V>> completedFetchesItr = completedFetches.iterator(); while (completedFetchesItr.hasNext()) { final CompletedFetch<K, V> completedFetch = completedFetchesItr.next(); final TopicPartition tp = completedFetch.partition; if (!assignedPartitions.contains(tp)) { log.debug("Removing {} from buffered data as it is no longer an assigned partition", tp); completedFetch.drain(); completedFetchesItr.remove(); } } if (nextInLineFetch != null && !assignedPartitions.contains(nextInLineFetch.partition)) { nextInLineFetch.drain(); nextInLineFetch = null; } } /** * Clear the buffered data which are not a part of newly assigned topics * * @param assignedTopics newly assigned topics */ public void clearBufferedDataForUnassignedTopics(Collection<String> assignedTopics) { final Set<TopicPartition> currentTopicPartitions = new HashSet<>(); for (TopicPartition tp : subscriptions.assignedPartitions()) { if (assignedTopics.contains(tp.topic())) { currentTopicPartitions.add(tp); } } clearBufferedDataForUnassignedPartitions(currentTopicPartitions); } protected FetchSessionHandler sessionHandler(int node) { return sessionHandlers.get(node); } // Visible for testing void maybeCloseFetchSessions(final Timer timer) { final Cluster cluster = metadata.fetch(); final List<RequestFuture<ClientResponse>> requestFutures = new ArrayList<>(); sessionHandlers.forEach((fetchTargetNodeId, sessionHandler) -> { // set the session handler to notify close. This will set the next metadata request to send close message. sessionHandler.notifyClose(); final int sessionId = sessionHandler.sessionId(); // FetchTargetNode may not be available as it may have disconnected the connection. In such cases, we will // skip sending the close request. final Node fetchTarget = cluster.nodeById(fetchTargetNodeId); if (fetchTarget == null || client.isUnavailable(fetchTarget)) { log.debug("Skip sending close session request to broker {} since it is not reachable", fetchTarget); return; } final FetchRequest.Builder request = createFetchRequest(fetchTarget, sessionHandler.newBuilder().build()); final RequestFuture<ClientResponse> responseFuture = client.send(fetchTarget, request); responseFuture.addListener(new RequestFutureListener<ClientResponse>() { @Override public void onSuccess(ClientResponse value) { log.debug("Successfully sent a close message for fetch session: {} to node: {}", sessionId, fetchTarget); } @Override public void onFailure(RuntimeException e) { log.debug("Unable to a close message for fetch session: {} to node: {}. " + "This may result in unnecessary fetch sessions at the broker.", sessionId, fetchTarget, e); } }); requestFutures.add(responseFuture); }); // Poll to ensure that request has been written to the socket. Wait until either the timer has expired or until // all requests have received a response. while (timer.notExpired() && !requestFutures.stream().allMatch(RequestFuture::isDone)) { client.poll(timer, null, true); } if (!requestFutures.stream().allMatch(RequestFuture::isDone)) { // we ran out of time before completing all futures. It is ok since we don't want to block the shutdown // here. log.debug("All requests couldn't be sent in the specific timeout period {}ms. " + "This may result in unnecessary fetch sessions at the broker. Consider increasing the timeout passed for " + "KafkaConsumer.close(Duration timeout)", timer.timeoutMs()); } } public void close(final Timer timer) { // we do not need to re-enable wakeups since we are closing already client.disableWakeups(); if (nextInLineFetch != null) { nextInLineFetch.drain(); nextInLineFetch = null; } maybeCloseFetchSessions(timer); Utils.closeQuietly(decompressionBufferSupplier, "decompressionBufferSupplier"); sessionHandlers.clear(); } @Override public void close() { close(time.timer(0)); } private void requestMetadataUpdate(final TopicPartition topicPartition) { metadata.requestUpdate(); subscriptions.clearPreferredReadReplica(topicPartition); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/AbstractPartitionAssignor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; /** * Abstract assignor implementation which does some common grunt work (in particular collecting * partition counts which are always needed in assignors). */ public abstract class AbstractPartitionAssignor implements ConsumerPartitionAssignor { private static final Logger log = LoggerFactory.getLogger(AbstractPartitionAssignor.class); private static final Node[] NO_NODES = new Node[] {Node.noNode()}; // Used only in unit tests to verify rack-aware assignment when all racks have all partitions. boolean preferRackAwareLogic; /** * Perform the group assignment given the partition counts and member subscriptions * @param partitionsPerTopic The number of partitions for each subscribed topic. Topics not in metadata will be excluded * from this map. * @param subscriptions Map from the member id to their respective topic subscription * @return Map from each member to the list of partitions assigned to them. */ public abstract Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); /** * Default implementation of assignPartitions() that does not include racks. This is only * included to avoid breaking any custom implementation that extends AbstractPartitionAssignor. * Note that this class is internal, but to be safe, we are maintaining compatibility. */ public Map<String, List<TopicPartition>> assignPartitions(Map<String, List<PartitionInfo>> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, Integer> partitionCountPerTopic = partitionsPerTopic.entrySet().stream() .collect(Collectors.toMap(Entry::getKey, e -> e.getValue().size())); return assign(partitionCountPerTopic, subscriptions); } @Override public GroupAssignment assign(Cluster metadata, GroupSubscription groupSubscription) { Map<String, Subscription> subscriptions = groupSubscription.groupSubscription(); Set<String> allSubscribedTopics = new HashSet<>(); for (Map.Entry<String, Subscription> subscriptionEntry : subscriptions.entrySet()) allSubscribedTopics.addAll(subscriptionEntry.getValue().topics()); Map<String, List<PartitionInfo>> partitionsPerTopic = new HashMap<>(); for (String topic : allSubscribedTopics) { List<PartitionInfo> partitions = metadata.partitionsForTopic(topic); if (partitions != null && !partitions.isEmpty()) { partitions = new ArrayList<>(partitions); partitions.sort(Comparator.comparingInt(PartitionInfo::partition)); partitionsPerTopic.put(topic, partitions); } else { log.debug("Skipping assignment for topic {} since no metadata is available", topic); } } Map<String, List<TopicPartition>> rawAssignments = assignPartitions(partitionsPerTopic, subscriptions); // this class maintains no user data, so just wrap the results Map<String, Assignment> assignments = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> assignmentEntry : rawAssignments.entrySet()) assignments.put(assignmentEntry.getKey(), new Assignment(assignmentEntry.getValue())); return new GroupAssignment(assignments); } protected static <K, V> void put(Map<K, List<V>> map, K key, V value) { List<V> list = map.computeIfAbsent(key, k -> new ArrayList<>()); list.add(value); } protected static List<TopicPartition> partitions(String topic, int numPartitions) { List<TopicPartition> partitions = new ArrayList<>(numPartitions); for (int i = 0; i < numPartitions; i++) partitions.add(new TopicPartition(topic, i)); return partitions; } protected static Map<String, List<PartitionInfo>> partitionInfosWithoutRacks(Map<String, Integer> partitionsPerTopic) { return partitionsPerTopic.entrySet().stream().collect(Collectors.toMap(Entry::getKey, e -> { String topic = e.getKey(); int numPartitions = e.getValue(); List<PartitionInfo> partitionInfos = new ArrayList<>(numPartitions); for (int i = 0; i < numPartitions; i++) partitionInfos.add(new PartitionInfo(topic, i, Node.noNode(), NO_NODES, NO_NODES)); return partitionInfos; })); } protected boolean useRackAwareAssignment(Set<String> consumerRacks, Set<String> partitionRacks, Map<TopicPartition, Set<String>> racksPerPartition) { if (consumerRacks.isEmpty() || Collections.disjoint(consumerRacks, partitionRacks)) return false; else if (preferRackAwareLogic) return true; else { return !racksPerPartition.values().stream().allMatch(partitionRacks::equals); } } public static class MemberInfo implements Comparable<MemberInfo> { public final String memberId; public final Optional<String> groupInstanceId; public final Optional<String> rackId; public MemberInfo(String memberId, Optional<String> groupInstanceId, Optional<String> rackId) { this.memberId = memberId; this.groupInstanceId = groupInstanceId; this.rackId = rackId; } public MemberInfo(String memberId, Optional<String> groupInstanceId) { this(memberId, groupInstanceId, Optional.empty()); } @Override public int compareTo(MemberInfo otherMemberInfo) { if (this.groupInstanceId.isPresent() && otherMemberInfo.groupInstanceId.isPresent()) { return this.groupInstanceId.get() .compareTo(otherMemberInfo.groupInstanceId.get()); } else if (this.groupInstanceId.isPresent()) { return -1; } else if (otherMemberInfo.groupInstanceId.isPresent()) { return 1; } else { return this.memberId.compareTo(otherMemberInfo.memberId); } } @Override public boolean equals(Object o) { return o instanceof MemberInfo && this.memberId.equals(((MemberInfo) o).memberId); } /** * We could just use member.id to be the hashcode, since it's unique * across the group. */ @Override public int hashCode() { return memberId.hashCode(); } @Override public String toString() { return "MemberInfo [member.id: " + memberId + ", group.instance.id: " + groupInstanceId.orElse("{}") + "]"; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.TreeSet; import java.util.stream.Collectors; import org.apache.kafka.clients.consumer.internals.Utils.PartitionComparator; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Sticky assignment implementation used by {@link org.apache.kafka.clients.consumer.StickyAssignor} and * {@link org.apache.kafka.clients.consumer.CooperativeStickyAssignor}. Sticky assignors are rack-aware. * If racks are specified for consumers, we attempt to match consumer racks with partition replica * racks on a best-effort basis, prioritizing balanced assignment over rack alignment. Previously * owned partitions may be reassigned to improve rack locality. We use rack-aware assignment if both * consumer and partition racks are available and some partitions have replicas only on a subset of racks. */ public abstract class AbstractStickyAssignor extends AbstractPartitionAssignor { private static final Logger log = LoggerFactory.getLogger(AbstractStickyAssignor.class); public static final int DEFAULT_GENERATION = -1; public int maxGeneration = DEFAULT_GENERATION; private PartitionMovements partitionMovements; // Keep track of the partitions being migrated from one consumer to another during assignment // so the cooperative assignor can adjust the assignment protected Map<TopicPartition, String> partitionsTransferringOwnership = new HashMap<>(); static final class ConsumerGenerationPair { final String consumer; final int generation; ConsumerGenerationPair(String consumer, int generation) { this.consumer = consumer; this.generation = generation; } } public static final class MemberData { public final List<TopicPartition> partitions; public final Optional<Integer> generation; public final Optional<String> rackId; public MemberData(List<TopicPartition> partitions, Optional<Integer> generation, Optional<String> rackId) { this.partitions = partitions; this.generation = generation; this.rackId = rackId; } public MemberData(List<TopicPartition> partitions, Optional<Integer> generation) { this(partitions, generation, Optional.empty()); } } abstract protected MemberData memberData(Subscription subscription); @Override public Map<String, List<TopicPartition>> assignPartitions(Map<String, List<PartitionInfo>> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> consumerToOwnedPartitions = new HashMap<>(); Set<TopicPartition> partitionsWithMultiplePreviousOwners = new HashSet<>(); List<PartitionInfo> allPartitions = new ArrayList<>(); partitionsPerTopic.values().forEach(allPartitions::addAll); RackInfo rackInfo = new RackInfo(allPartitions, subscriptions); AbstractAssignmentBuilder assignmentBuilder; if (allSubscriptionsEqual(partitionsPerTopic.keySet(), subscriptions, consumerToOwnedPartitions, partitionsWithMultiplePreviousOwners)) { log.debug("Detected that all consumers were subscribed to same set of topics, invoking the " + "optimized assignment algorithm"); partitionsTransferringOwnership = new HashMap<>(); assignmentBuilder = new ConstrainedAssignmentBuilder(partitionsPerTopic, rackInfo, consumerToOwnedPartitions, partitionsWithMultiplePreviousOwners); } else { log.debug("Detected that not all consumers were subscribed to same set of topics, falling back to the " + "general case assignment algorithm"); // we must set this to null for the general case so the cooperative assignor knows to compute it from scratch partitionsTransferringOwnership = null; assignmentBuilder = new GeneralAssignmentBuilder(partitionsPerTopic, rackInfo, consumerToOwnedPartitions, subscriptions); } return assignmentBuilder.build(); } public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { return assignPartitions(partitionInfosWithoutRacks(partitionsPerTopic), subscriptions); } /** * Returns true iff all consumers have an identical subscription. Also fills out the passed in * {@code consumerToOwnedPartitions} with each consumer's previously owned and still-subscribed partitions, * and the {@code partitionsWithMultiplePreviousOwners} with any partitions claimed by multiple previous owners */ private boolean allSubscriptionsEqual(Set<String> allTopics, Map<String, Subscription> subscriptions, Map<String, List<TopicPartition>> consumerToOwnedPartitions, Set<TopicPartition> partitionsWithMultiplePreviousOwners) { boolean isAllSubscriptionsEqual = true; Set<String> subscribedTopics = new HashSet<>(); // keep track of all previously owned partitions so we can invalidate them if invalid input is // detected, eg two consumers somehow claiming the same partition in the same/current generation Map<TopicPartition, String> allPreviousPartitionsToOwner = new HashMap<>(); for (Map.Entry<String, Subscription> subscriptionEntry : subscriptions.entrySet()) { final String consumer = subscriptionEntry.getKey(); final Subscription subscription = subscriptionEntry.getValue(); // initialize the subscribed topics set if this is the first subscription if (subscribedTopics.isEmpty()) { subscribedTopics.addAll(subscription.topics()); } else if (isAllSubscriptionsEqual && !(subscription.topics().size() == subscribedTopics.size() && subscribedTopics.containsAll(subscription.topics()))) { isAllSubscriptionsEqual = false; } MemberData memberData = memberData(subscription); final int memberGeneration = memberData.generation.orElse(DEFAULT_GENERATION); maxGeneration = Math.max(maxGeneration, memberGeneration); List<TopicPartition> ownedPartitions = new ArrayList<>(); consumerToOwnedPartitions.put(consumer, ownedPartitions); // the member has a valid generation, so we can consider its owned partitions if it has the highest // generation amongst for (final TopicPartition tp : memberData.partitions) { if (allTopics.contains(tp.topic())) { String otherConsumer = allPreviousPartitionsToOwner.put(tp, consumer); if (otherConsumer == null) { // this partition is not owned by other consumer in the same generation ownedPartitions.add(tp); } else { final int otherMemberGeneration = subscriptions.get(otherConsumer).generationId().orElse(DEFAULT_GENERATION); if (memberGeneration == otherMemberGeneration) { // if two members of the same generation own the same partition, revoke the partition log.error("Found multiple consumers {} and {} claiming the same TopicPartition {} in the " + "same generation {}, this will be invalidated and removed from their previous assignment.", consumer, otherConsumer, tp, memberGeneration); partitionsWithMultiplePreviousOwners.add(tp); consumerToOwnedPartitions.get(otherConsumer).remove(tp); allPreviousPartitionsToOwner.put(tp, consumer); } else if (memberGeneration > otherMemberGeneration) { // move partition from the member with an older generation to the member with the newer generation ownedPartitions.add(tp); consumerToOwnedPartitions.get(otherConsumer).remove(tp); allPreviousPartitionsToOwner.put(tp, consumer); log.warn("Consumer {} in generation {} and consumer {} in generation {} claiming the same " + "TopicPartition {} in different generations. The topic partition wil be " + "assigned to the member with the higher generation {}.", consumer, memberGeneration, otherConsumer, otherMemberGeneration, tp, memberGeneration); } else { // let the other member continue to own the topic partition log.warn("Consumer {} in generation {} and consumer {} in generation {} claiming the same " + "TopicPartition {} in different generations. The topic partition wil be " + "assigned to the member with the higher generation {}.", consumer, memberGeneration, otherConsumer, otherMemberGeneration, tp, otherMemberGeneration); } } } } } return isAllSubscriptionsEqual; } public boolean isSticky() { return partitionMovements.isSticky(); } private static class TopicComparator implements Comparator<String>, Serializable { private static final long serialVersionUID = 1L; private final Map<String, List<String>> map; TopicComparator(Map<String, List<String>> map) { this.map = map; } @Override public int compare(String o1, String o2) { int ret = map.get(o1).size() - map.get(o2).size(); if (ret == 0) { ret = o1.compareTo(o2); } return ret; } } private static class SubscriptionComparator implements Comparator<String>, Serializable { private static final long serialVersionUID = 1L; private final Map<String, List<TopicPartition>> map; SubscriptionComparator(Map<String, List<TopicPartition>> map) { this.map = map; } @Override public int compare(String o1, String o2) { int ret = map.get(o1).size() - map.get(o2).size(); if (ret == 0) ret = o1.compareTo(o2); return ret; } } /** * This class maintains some data structures to simplify lookup of partition movements among consumers. At each point of * time during a partition rebalance it keeps track of partition movements corresponding to each topic, and also possible * movement (in form a <code>ConsumerPair</code> object) for each partition. */ private static class PartitionMovements { private final Map<String, Map<ConsumerPair, Set<TopicPartition>>> partitionMovementsByTopic = new HashMap<>(); private final Map<TopicPartition, ConsumerPair> partitionMovements = new HashMap<>(); private ConsumerPair removeMovementRecordOfPartition(TopicPartition partition) { ConsumerPair pair = partitionMovements.remove(partition); String topic = partition.topic(); Map<ConsumerPair, Set<TopicPartition>> partitionMovementsForThisTopic = partitionMovementsByTopic.get(topic); partitionMovementsForThisTopic.get(pair).remove(partition); if (partitionMovementsForThisTopic.get(pair).isEmpty()) partitionMovementsForThisTopic.remove(pair); if (partitionMovementsByTopic.get(topic).isEmpty()) partitionMovementsByTopic.remove(topic); return pair; } private void addPartitionMovementRecord(TopicPartition partition, ConsumerPair pair) { partitionMovements.put(partition, pair); String topic = partition.topic(); if (!partitionMovementsByTopic.containsKey(topic)) partitionMovementsByTopic.put(topic, new HashMap<>()); Map<ConsumerPair, Set<TopicPartition>> partitionMovementsForThisTopic = partitionMovementsByTopic.get(topic); if (!partitionMovementsForThisTopic.containsKey(pair)) partitionMovementsForThisTopic.put(pair, new HashSet<>()); partitionMovementsForThisTopic.get(pair).add(partition); } private void movePartition(TopicPartition partition, String oldConsumer, String newConsumer) { ConsumerPair pair = new ConsumerPair(oldConsumer, newConsumer); if (partitionMovements.containsKey(partition)) { // this partition has previously moved ConsumerPair existingPair = removeMovementRecordOfPartition(partition); assert existingPair.dstMemberId.equals(oldConsumer); if (!existingPair.srcMemberId.equals(newConsumer)) { // the partition is not moving back to its previous consumer // return new ConsumerPair2(existingPair.src, newConsumer); addPartitionMovementRecord(partition, new ConsumerPair(existingPair.srcMemberId, newConsumer)); } } else addPartitionMovementRecord(partition, pair); } private TopicPartition getTheActualPartitionToBeMoved(TopicPartition partition, String oldConsumer, String newConsumer) { String topic = partition.topic(); if (!partitionMovementsByTopic.containsKey(topic)) return partition; if (partitionMovements.containsKey(partition)) { // this partition has previously moved assert oldConsumer.equals(partitionMovements.get(partition).dstMemberId); oldConsumer = partitionMovements.get(partition).srcMemberId; } Map<ConsumerPair, Set<TopicPartition>> partitionMovementsForThisTopic = partitionMovementsByTopic.get(topic); ConsumerPair reversePair = new ConsumerPair(newConsumer, oldConsumer); if (!partitionMovementsForThisTopic.containsKey(reversePair)) return partition; return partitionMovementsForThisTopic.get(reversePair).iterator().next(); } private boolean isLinked(String src, String dst, Set<ConsumerPair> pairs, List<String> currentPath) { if (src.equals(dst)) return false; if (pairs.isEmpty()) return false; if (new ConsumerPair(src, dst).in(pairs)) { currentPath.add(src); currentPath.add(dst); return true; } for (ConsumerPair pair: pairs) if (pair.srcMemberId.equals(src)) { Set<ConsumerPair> reducedSet = new HashSet<>(pairs); reducedSet.remove(pair); currentPath.add(pair.srcMemberId); return isLinked(pair.dstMemberId, dst, reducedSet, currentPath); } return false; } private boolean in(List<String> cycle, Set<List<String>> cycles) { List<String> superCycle = new ArrayList<>(cycle); superCycle.remove(superCycle.size() - 1); superCycle.addAll(cycle); for (List<String> foundCycle: cycles) { if (foundCycle.size() == cycle.size() && Collections.indexOfSubList(superCycle, foundCycle) != -1) return true; } return false; } private boolean hasCycles(Set<ConsumerPair> pairs) { Set<List<String>> cycles = new HashSet<>(); for (ConsumerPair pair: pairs) { Set<ConsumerPair> reducedPairs = new HashSet<>(pairs); reducedPairs.remove(pair); List<String> path = new ArrayList<>(Collections.singleton(pair.srcMemberId)); if (isLinked(pair.dstMemberId, pair.srcMemberId, reducedPairs, path) && !in(path, cycles)) { cycles.add(new ArrayList<>(path)); log.error("A cycle of length {} was found: {}", path.size() - 1, path); } } // for now we want to make sure there is no partition movements of the same topic between a pair of consumers. // the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized // tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases. for (List<String> cycle: cycles) if (cycle.size() == 3) // indicates a cycle of length 2 return true; return false; } private boolean isSticky() { for (Map.Entry<String, Map<ConsumerPair, Set<TopicPartition>>> topicMovements: this.partitionMovementsByTopic.entrySet()) { Set<ConsumerPair> topicMovementPairs = topicMovements.getValue().keySet(); if (hasCycles(topicMovementPairs)) { log.error("Stickiness is violated for topic {}" + "\nPartition movements for this topic occurred among the following consumer pairs:" + "\n{}", topicMovements.getKey(), topicMovements.getValue().toString()); return false; } } return true; } } /** * <code>ConsumerPair</code> represents a pair of Kafka consumer ids involved in a partition reassignment. Each * <code>ConsumerPair</code> object, which contains a source (<code>src</code>) and a destination (<code>dst</code>) * element, normally corresponds to a particular partition or topic, and indicates that the particular partition or some * partition of the particular topic was moved from the source consumer to the destination consumer during the rebalance. * This class is used, through the <code>PartitionMovements</code> class, by the sticky assignor and helps in determining * whether a partition reassignment results in cycles among the generated graph of consumer pairs. */ private static class ConsumerPair { private final String srcMemberId; private final String dstMemberId; ConsumerPair(String srcMemberId, String dstMemberId) { this.srcMemberId = srcMemberId; this.dstMemberId = dstMemberId; } public String toString() { return this.srcMemberId + "->" + this.dstMemberId; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((this.srcMemberId == null) ? 0 : this.srcMemberId.hashCode()); result = prime * result + ((this.dstMemberId == null) ? 0 : this.dstMemberId.hashCode()); return result; } @Override public boolean equals(Object obj) { if (obj == null) return false; if (!getClass().isInstance(obj)) return false; ConsumerPair otherPair = (ConsumerPair) obj; return this.srcMemberId.equals(otherPair.srcMemberId) && this.dstMemberId.equals(otherPair.dstMemberId); } private boolean in(Set<ConsumerPair> pairs) { for (ConsumerPair pair: pairs) if (this.equals(pair)) return true; return false; } } private class RackInfo { private final Map<String, String> consumerRacks; private final Map<TopicPartition, Set<String>> partitionRacks; private final Map<TopicPartition, Integer> numConsumersByPartition; public RackInfo(List<PartitionInfo> partitionInfos, Map<String, Subscription> subscriptions) { List<Subscription> consumers = new ArrayList<>(subscriptions.values()); Map<String, List<String>> consumersByRack = new HashMap<>(); subscriptions.forEach((memberId, subscription) -> subscription.rackId().filter(r -> !r.isEmpty()).ifPresent(rackId -> put(consumersByRack, rackId, memberId))); Map<String, List<TopicPartition>> partitionsByRack; Map<TopicPartition, Set<String>> partitionRacks; if (consumersByRack.isEmpty()) { partitionsByRack = Collections.emptyMap(); partitionRacks = Collections.emptyMap(); } else { partitionRacks = new HashMap<>(partitionInfos.size()); partitionsByRack = new HashMap<>(); partitionInfos.forEach(p -> { TopicPartition tp = new TopicPartition(p.topic(), p.partition()); Set<String> racks = new HashSet<>(p.replicas().length); partitionRacks.put(tp, racks); Arrays.stream(p.replicas()) .map(Node::rack) .filter(Objects::nonNull) .distinct() .forEach(rackId -> { put(partitionsByRack, rackId, tp); racks.add(rackId); }); }); } if (useRackAwareAssignment(consumersByRack.keySet(), partitionsByRack.keySet(), partitionRacks)) { this.consumerRacks = new HashMap<>(consumers.size()); consumersByRack.forEach((rack, rackConsumers) -> rackConsumers.forEach(c -> consumerRacks.put(c, rack))); this.partitionRacks = partitionRacks; } else { this.consumerRacks = Collections.emptyMap(); this.partitionRacks = Collections.emptyMap(); } numConsumersByPartition = partitionRacks.entrySet().stream() .collect(Collectors.toMap(Entry::getKey, e -> e.getValue().stream() .map(r -> consumersByRack.getOrDefault(r, Collections.emptyList()).size()) .reduce(0, Integer::sum))); } private boolean racksMismatch(String consumer, TopicPartition tp) { String consumerRack = consumerRacks.get(consumer); Set<String> replicaRacks = partitionRacks.get(tp); return consumerRack != null && (replicaRacks == null || !replicaRacks.contains(consumerRack)); } private List<TopicPartition> sortPartitionsByRackConsumers(List<TopicPartition> partitions) { if (numConsumersByPartition.isEmpty()) return partitions; // Return a sorted linked list of partitions to enable fast updates during rack-aware assignment List<TopicPartition> sortedPartitions = new LinkedList<>(partitions); sortedPartitions.sort(Comparator.comparing(tp -> numConsumersByPartition.getOrDefault(tp, 0))); return sortedPartitions; } private int nextRackConsumer(TopicPartition tp, List<String> consumerList, int firstIndex) { Set<String> racks = partitionRacks.get(tp); if (racks == null || racks.isEmpty()) return -1; for (int i = 0; i < consumerList.size(); i++) { int index = (firstIndex + i) % consumerList.size(); String consumer = consumerList.get(index); String consumerRack = consumerRacks.get(consumer); if (consumerRack != null && racks.contains(consumerRack)) return index; } return -1; } @Override public String toString() { return "RackInfo(" + "consumerRacks=" + consumerRacks + ", partitionRacks=" + partitionRacks + ")"; } } private abstract class AbstractAssignmentBuilder { final Map<String, List<PartitionInfo>> partitionsPerTopic; final RackInfo rackInfo; final Map<String, List<TopicPartition>> currentAssignment; final int totalPartitionsCount; AbstractAssignmentBuilder(Map<String, List<PartitionInfo>> partitionsPerTopic, RackInfo rackInfo, Map<String, List<TopicPartition>> currentAssignment) { this.partitionsPerTopic = partitionsPerTopic; this.currentAssignment = currentAssignment; this.rackInfo = rackInfo; this.totalPartitionsCount = partitionsPerTopic.values().stream().map(List::size).reduce(0, Integer::sum); } /** * Builds the assignment. * * @return Map from each member to the list of partitions assigned to them. */ abstract Map<String, List<TopicPartition>> build(); protected List<TopicPartition> getAllTopicPartitions(List<String> sortedAllTopics) { List<TopicPartition> allPartitions = new ArrayList<>(totalPartitionsCount); for (String topic : sortedAllTopics) { partitionsPerTopic.get(topic).forEach(p -> allPartitions.add(new TopicPartition(p.topic(), p.partition()))); } return allPartitions; } } /** * This constrained assignment optimizes the assignment algorithm when all consumers were subscribed to same set of topics. * The method includes the following steps: * * 1. Reassign previously owned partitions: * a. if owned less than minQuota partitions, just assign all owned partitions, and put the member into unfilled member list. * b. if owned maxQuota or more, and we're still under the number of expected max capacity members, assign maxQuota partitions * c. if owned at least "minQuota" of partitions, assign minQuota partitions, and put the member into unfilled member list if * we're still under the number of expected max capacity members * If using rack-aware algorithm, only owned partitions with matching racks are allocated in this step. * 2. Fill remaining members with rack matching up to the expected numbers of maxQuota partitions, otherwise, to minQuota partitions. * Partitions that cannot be aligned on racks within the quota are not assigned in this step. This step is only used if rack-aware. * 3. Fill remaining members up to the expected numbers of maxQuota partitions, otherwise, to minQuota partitions. * For rack-aware algorithm, these are partitions that could not be aligned on racks within the balancing constraints. * */ private class ConstrainedAssignmentBuilder extends AbstractAssignmentBuilder { private final Set<TopicPartition> partitionsWithMultiplePreviousOwners; private final Set<TopicPartition> allRevokedPartitions; // the consumers which may still be assigned one or more partitions to reach expected capacity private final List<String> unfilledMembersWithUnderMinQuotaPartitions; private final LinkedList<String> unfilledMembersWithExactlyMinQuotaPartitions; private final int minQuota; private final int maxQuota; // the expected number of members receiving more than minQuota partitions (zero when minQuota == maxQuota) private final int expectedNumMembersWithOverMinQuotaPartitions; // the current number of members receiving more than minQuota partitions (zero when minQuota == maxQuota) private int currentNumMembersWithOverMinQuotaPartitions; private final Map<String, List<TopicPartition>> assignment; private final List<TopicPartition> assignedPartitions; /** * Constructs a constrained assignment builder. * * @param partitionsPerTopic The partitions for each subscribed topic * @param rackInfo Rack information for consumers and racks * @param consumerToOwnedPartitions Each consumer's previously owned and still-subscribed partitions * @param partitionsWithMultiplePreviousOwners The partitions being claimed in the previous assignment of multiple consumers */ ConstrainedAssignmentBuilder(Map<String, List<PartitionInfo>> partitionsPerTopic, RackInfo rackInfo, Map<String, List<TopicPartition>> consumerToOwnedPartitions, Set<TopicPartition> partitionsWithMultiplePreviousOwners) { super(partitionsPerTopic, rackInfo, consumerToOwnedPartitions); this.partitionsWithMultiplePreviousOwners = partitionsWithMultiplePreviousOwners; allRevokedPartitions = new HashSet<>(); unfilledMembersWithUnderMinQuotaPartitions = new LinkedList<>(); unfilledMembersWithExactlyMinQuotaPartitions = new LinkedList<>(); int numberOfConsumers = consumerToOwnedPartitions.size(); minQuota = (int) Math.floor(((double) totalPartitionsCount) / numberOfConsumers); maxQuota = (int) Math.ceil(((double) totalPartitionsCount) / numberOfConsumers); expectedNumMembersWithOverMinQuotaPartitions = totalPartitionsCount % numberOfConsumers; currentNumMembersWithOverMinQuotaPartitions = 0; // initialize the assignment map with an empty array of size maxQuota for all members assignment = new HashMap<>(consumerToOwnedPartitions.keySet().stream() .collect(Collectors.toMap(c -> c, c -> new ArrayList<>(maxQuota)))); assignedPartitions = new ArrayList<>(); } @Override Map<String, List<TopicPartition>> build() { if (log.isDebugEnabled()) { log.debug("Performing constrained assign with partitionsPerTopic: {}, currentAssignment: {}, rackInfo {}.", partitionsPerTopic, currentAssignment, rackInfo); } assignOwnedPartitions(); List<TopicPartition> unassignedPartitions = getUnassignedPartitions(assignedPartitions); if (log.isDebugEnabled()) { log.debug("After reassigning previously owned partitions, unfilled members: {}, unassigned partitions: {}, " + "current assignment: {}", unfilledMembersWithUnderMinQuotaPartitions, unassignedPartitions, assignment); } Collections.sort(unfilledMembersWithUnderMinQuotaPartitions); Collections.sort(unfilledMembersWithExactlyMinQuotaPartitions); unassignedPartitions = rackInfo.sortPartitionsByRackConsumers(unassignedPartitions); assignRackAwareRoundRobin(unassignedPartitions); assignRoundRobin(unassignedPartitions); verifyUnfilledMembers(); log.info("Final assignment of partitions to consumers: \n{}", assignment); return assignment; } // Reassign previously owned partitions, up to the expected number of partitions per consumer private void assignOwnedPartitions() { for (Map.Entry<String, List<TopicPartition>> consumerEntry : currentAssignment.entrySet()) { String consumer = consumerEntry.getKey(); List<TopicPartition> ownedPartitions = consumerEntry.getValue().stream() .filter(tp -> !rackInfo.racksMismatch(consumer, tp)) .collect(Collectors.toList()); List<TopicPartition> consumerAssignment = assignment.get(consumer); for (TopicPartition doublyClaimedPartition : partitionsWithMultiplePreviousOwners) { if (ownedPartitions.contains(doublyClaimedPartition)) { log.error("Found partition {} still claimed as owned by consumer {}, despite being claimed by multiple " + "consumers already in the same generation. Removing it from the ownedPartitions", doublyClaimedPartition, consumer); ownedPartitions.remove(doublyClaimedPartition); } } if (ownedPartitions.size() < minQuota) { // the expected assignment size is more than this consumer has now, so keep all the owned partitions // and put this member into the unfilled member list if (ownedPartitions.size() > 0) { consumerAssignment.addAll(ownedPartitions); assignedPartitions.addAll(ownedPartitions); } unfilledMembersWithUnderMinQuotaPartitions.add(consumer); } else if (ownedPartitions.size() >= maxQuota && currentNumMembersWithOverMinQuotaPartitions < expectedNumMembersWithOverMinQuotaPartitions) { // consumer owned the "maxQuota" of partitions or more, and we're still under the number of expected members // with more than the minQuota partitions, so keep "maxQuota" of the owned partitions, and revoke the rest of the partitions currentNumMembersWithOverMinQuotaPartitions++; if (currentNumMembersWithOverMinQuotaPartitions == expectedNumMembersWithOverMinQuotaPartitions) { unfilledMembersWithExactlyMinQuotaPartitions.clear(); } List<TopicPartition> maxQuotaPartitions = ownedPartitions.subList(0, maxQuota); consumerAssignment.addAll(maxQuotaPartitions); assignedPartitions.addAll(maxQuotaPartitions); allRevokedPartitions.addAll(ownedPartitions.subList(maxQuota, ownedPartitions.size())); } else { // consumer owned at least "minQuota" of partitions // so keep "minQuota" of the owned partitions, and revoke the rest of the partitions List<TopicPartition> minQuotaPartitions = ownedPartitions.subList(0, minQuota); consumerAssignment.addAll(minQuotaPartitions); assignedPartitions.addAll(minQuotaPartitions); allRevokedPartitions.addAll(ownedPartitions.subList(minQuota, ownedPartitions.size())); // this consumer is potential maxQuota candidate since we're still under the number of expected members // with more than the minQuota partitions. Note, if the number of expected members with more than // the minQuota partitions is 0, it means minQuota == maxQuota, and there are no potentially unfilled if (currentNumMembersWithOverMinQuotaPartitions < expectedNumMembersWithOverMinQuotaPartitions) { unfilledMembersWithExactlyMinQuotaPartitions.add(consumer); } } } } // Round-Robin filling within racks for remaining members up to the expected numbers of maxQuota, // otherwise, to minQuota private void assignRackAwareRoundRobin(List<TopicPartition> unassignedPartitions) { if (rackInfo.consumerRacks.isEmpty()) return; int nextUnfilledConsumerIndex = 0; Iterator<TopicPartition> unassignedIter = unassignedPartitions.iterator(); while (unassignedIter.hasNext()) { TopicPartition unassignedPartition = unassignedIter.next(); String consumer = null; int nextIndex = rackInfo.nextRackConsumer(unassignedPartition, unfilledMembersWithUnderMinQuotaPartitions, nextUnfilledConsumerIndex); if (nextIndex >= 0) { consumer = unfilledMembersWithUnderMinQuotaPartitions.get(nextIndex); int assignmentCount = assignment.get(consumer).size() + 1; if (assignmentCount >= minQuota) { unfilledMembersWithUnderMinQuotaPartitions.remove(consumer); if (assignmentCount < maxQuota) unfilledMembersWithExactlyMinQuotaPartitions.add(consumer); } else { nextIndex++; } nextUnfilledConsumerIndex = unfilledMembersWithUnderMinQuotaPartitions.isEmpty() ? 0 : nextIndex % unfilledMembersWithUnderMinQuotaPartitions.size(); } else if (!unfilledMembersWithExactlyMinQuotaPartitions.isEmpty()) { int firstIndex = rackInfo.nextRackConsumer(unassignedPartition, unfilledMembersWithExactlyMinQuotaPartitions, 0); if (firstIndex >= 0) { consumer = unfilledMembersWithExactlyMinQuotaPartitions.get(firstIndex); if (assignment.get(consumer).size() + 1 == maxQuota) unfilledMembersWithExactlyMinQuotaPartitions.remove(firstIndex); } } if (consumer != null) { assignNewPartition(unassignedPartition, consumer); unassignedIter.remove(); } } } private void assignRoundRobin(List<TopicPartition> unassignedPartitions) { Iterator<String> unfilledConsumerIter = unfilledMembersWithUnderMinQuotaPartitions.iterator(); // Round-Robin filling remaining members up to the expected numbers of maxQuota, otherwise, to minQuota for (TopicPartition unassignedPartition : unassignedPartitions) { String consumer; if (unfilledConsumerIter.hasNext()) { consumer = unfilledConsumerIter.next(); } else { if (unfilledMembersWithUnderMinQuotaPartitions.isEmpty() && unfilledMembersWithExactlyMinQuotaPartitions.isEmpty()) { // Should not enter here since we have calculated the exact number to assign to each consumer. // This indicates issues in the assignment algorithm int currentPartitionIndex = unassignedPartitions.indexOf(unassignedPartition); log.error("No more unfilled consumers to be assigned. The remaining unassigned partitions are: {}", unassignedPartitions.subList(currentPartitionIndex, unassignedPartitions.size())); throw new IllegalStateException("No more unfilled consumers to be assigned."); } else if (unfilledMembersWithUnderMinQuotaPartitions.isEmpty()) { consumer = unfilledMembersWithExactlyMinQuotaPartitions.poll(); } else { unfilledConsumerIter = unfilledMembersWithUnderMinQuotaPartitions.iterator(); consumer = unfilledConsumerIter.next(); } } int currentAssignedCount = assignNewPartition(unassignedPartition, consumer); if (currentAssignedCount == minQuota) { unfilledConsumerIter.remove(); unfilledMembersWithExactlyMinQuotaPartitions.add(consumer); } else if (currentAssignedCount == maxQuota) { currentNumMembersWithOverMinQuotaPartitions++; if (currentNumMembersWithOverMinQuotaPartitions == expectedNumMembersWithOverMinQuotaPartitions) { // We only start to iterate over the "potentially unfilled" members at minQuota after we've filled // all members up to at least minQuota, so once the last minQuota member reaches maxQuota, we // should be done. But in case of some algorithmic error, just log a warning and continue to // assign any remaining partitions within the assignment constraints if (unassignedPartitions.indexOf(unassignedPartition) != unassignedPartitions.size() - 1) { log.error("Filled the last member up to maxQuota but still had partitions remaining to assign, " + "will continue but this indicates a bug in the assignment."); } } } } } private int assignNewPartition(TopicPartition unassignedPartition, String consumer) { List<TopicPartition> consumerAssignment = assignment.get(consumer); consumerAssignment.add(unassignedPartition); // We already assigned all possible ownedPartitions, so we know this must be newly assigned to this consumer // or else the partition was actually claimed by multiple previous owners and had to be invalidated from all // members claimed ownedPartitions if (allRevokedPartitions.contains(unassignedPartition) || partitionsWithMultiplePreviousOwners.contains(unassignedPartition)) partitionsTransferringOwnership.put(unassignedPartition, consumer); return consumerAssignment.size(); } private void verifyUnfilledMembers() { if (!unfilledMembersWithUnderMinQuotaPartitions.isEmpty()) { // we expected all the remaining unfilled members have minQuota partitions and we're already at the expected number // of members with more than the minQuota partitions. Otherwise, there must be error here. if (currentNumMembersWithOverMinQuotaPartitions != expectedNumMembersWithOverMinQuotaPartitions) { log.error("Current number of members with more than the minQuota partitions: {}, is less than the expected number " + "of members with more than the minQuota partitions: {}, and no more partitions to be assigned to the remaining unfilled consumers: {}", currentNumMembersWithOverMinQuotaPartitions, expectedNumMembersWithOverMinQuotaPartitions, unfilledMembersWithUnderMinQuotaPartitions); throw new IllegalStateException("We haven't reached the expected number of members with " + "more than the minQuota partitions, but no more partitions to be assigned"); } else { for (String unfilledMember : unfilledMembersWithUnderMinQuotaPartitions) { int assignedPartitionsCount = assignment.get(unfilledMember).size(); if (assignedPartitionsCount != minQuota) { log.error("Consumer: [{}] should have {} partitions, but got {} partitions, and no more partitions " + "to be assigned. The remaining unfilled consumers are: {}", unfilledMember, minQuota, assignedPartitionsCount, unfilledMembersWithUnderMinQuotaPartitions); throw new IllegalStateException(String.format("Consumer: [%s] doesn't reach minQuota partitions, " + "and no more partitions to be assigned", unfilledMember)); } else { log.trace("skip over this unfilled member: [{}] because we've reached the expected number of " + "members with more than the minQuota partitions, and this member already has minQuota partitions", unfilledMember); } } } } } /** * get the unassigned partition list by computing the difference set of all sorted partitions * and sortedAssignedPartitions. If no assigned partitions, we'll just return all sorted topic partitions. * * To compute the difference set, we use two pointers technique here: * * We loop through the all sorted topics, and then iterate all partitions the topic has, * compared with the ith element in sortedAssignedPartitions(i starts from 0): * - if not equal to the ith element, add to unassignedPartitions * - if equal to the ith element, get next element from sortedAssignedPartitions * * @param sortedAssignedPartitions sorted partitions, all are included in the sortedPartitions * @return the partitions not yet assigned to any consumers */ private List<TopicPartition> getUnassignedPartitions(List<TopicPartition> sortedAssignedPartitions) { List<String> sortedAllTopics = new ArrayList<>(partitionsPerTopic.keySet()); // sort all topics first, then we can have sorted all topic partitions by adding partitions starting from 0 Collections.sort(sortedAllTopics); if (sortedAssignedPartitions.isEmpty()) { // no assigned partitions means all partitions are unassigned partitions return getAllTopicPartitions(sortedAllTopics); } List<TopicPartition> unassignedPartitions = new ArrayList<>(totalPartitionsCount - sortedAssignedPartitions.size()); Collections.sort(sortedAssignedPartitions, Comparator.comparing(TopicPartition::topic).thenComparing(TopicPartition::partition)); boolean shouldAddDirectly = false; Iterator<TopicPartition> sortedAssignedPartitionsIter = sortedAssignedPartitions.iterator(); TopicPartition nextAssignedPartition = sortedAssignedPartitionsIter.next(); for (String topic : sortedAllTopics) { int partitionCount = partitionsPerTopic.get(topic).size(); for (int i = 0; i < partitionCount; i++) { if (shouldAddDirectly || !(nextAssignedPartition.topic().equals(topic) && nextAssignedPartition.partition() == i)) { unassignedPartitions.add(new TopicPartition(topic, i)); } else { // this partition is in assignedPartitions, don't add to unassignedPartitions, just get next assigned partition if (sortedAssignedPartitionsIter.hasNext()) { nextAssignedPartition = sortedAssignedPartitionsIter.next(); } else { // add the remaining directly since there is no more sortedAssignedPartitions shouldAddDirectly = true; } } } } return unassignedPartitions; } } /** * This general assignment algorithm guarantees the assignment that is as balanced as possible. * This method includes the following steps: * * 1. Preserving all the existing partition assignments. If rack-aware algorithm is used, only assignments * within racks are preserved. * 2. Removing all the partition assignments that have become invalid due to the change that triggers the reassignment. * Partition assignments with mismatched racks are also removed. * 3. Assigning the unassigned partitions in a way that balances out the overall assignments of partitions to consumers. * while preserving rack-alignment. This step is used only for rack-aware assignment. * 4. Assigning the remaining unassigned partitions in a way that balances out the overall assignments of partitions to consumers. * For rack-aware algorithm, these are partitions that could not be aligned on racks within the balancing constraints. * 5. Further balancing out the resulting assignment by finding the partitions that can be reassigned * to another consumer towards an overall more balanced assignment. For rack-aware algorithm, attempt * to retain rack alignment if possible. * */ private class GeneralAssignmentBuilder extends AbstractAssignmentBuilder { private final Map<String, Subscription> subscriptions; // a mapping of all topics to all consumers that can be assigned to them private final Map<String, List<String>> topic2AllPotentialConsumers; // a mapping of all consumers to all potential topics that can be assigned to them private final Map<String, List<String>> consumer2AllPotentialTopics; // a mapping of partition to current consumer private final Map<TopicPartition, String> currentPartitionConsumer; private final List<TopicPartition> sortedAllPartitions; // an ascending sorted set of consumers based on how many topic partitions are already assigned to them private final TreeSet<String> sortedCurrentSubscriptions; private boolean revocationRequired; /** * Constructs a general assignment builder. * * @param partitionsPerTopic The partitions for each subscribed topic. * @param subscriptions Map from the member id to their respective topic subscription * @param currentAssignment Each consumer's previously owned and still-subscribed partitions * @param rackInfo Rack information for consumers and partitions */ GeneralAssignmentBuilder(Map<String, List<PartitionInfo>> partitionsPerTopic, RackInfo rackInfo, Map<String, List<TopicPartition>> currentAssignment, Map<String, Subscription> subscriptions) { super(partitionsPerTopic, rackInfo, currentAssignment); this.subscriptions = subscriptions; topic2AllPotentialConsumers = new HashMap<>(partitionsPerTopic.keySet().size()); consumer2AllPotentialTopics = new HashMap<>(subscriptions.keySet().size()); // initialize topic2AllPotentialConsumers and consumer2AllPotentialTopics partitionsPerTopic.keySet().forEach( topicName -> topic2AllPotentialConsumers.put(topicName, new ArrayList<>())); subscriptions.forEach((consumerId, subscription) -> { List<String> subscribedTopics = new ArrayList<>(subscription.topics().size()); consumer2AllPotentialTopics.put(consumerId, subscribedTopics); subscription.topics().stream().filter(topic -> partitionsPerTopic.get(topic) != null).forEach(topic -> { subscribedTopics.add(topic); topic2AllPotentialConsumers.get(topic).add(consumerId); }); // add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist if (!currentAssignment.containsKey(consumerId)) currentAssignment.put(consumerId, new ArrayList<>()); }); currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<String> sortedAllTopics = new ArrayList<>(topic2AllPotentialConsumers.keySet()); Collections.sort(sortedAllTopics, new TopicComparator(topic2AllPotentialConsumers)); sortedAllPartitions = getAllTopicPartitions(sortedAllTopics); sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); } @Override Map<String, List<TopicPartition>> build() { if (log.isDebugEnabled()) { log.debug("performing general assign. partitionsPerTopic: {}, subscriptions: {}, currentAssignment: {}, rackInfo: {}", partitionsPerTopic, subscriptions, currentAssignment, rackInfo); } Map<TopicPartition, ConsumerGenerationPair> prevAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(prevAssignment); // the partitions already assigned in current assignment List<TopicPartition> assignedPartitions = assignOwnedPartitions(); // all partitions that still need to be assigned List<TopicPartition> unassignedPartitions = getUnassignedPartitions(assignedPartitions); if (log.isDebugEnabled()) { log.debug("unassigned Partitions: {}", unassignedPartitions); } // at this point we have preserved all valid topic partition to consumer assignments and removed // all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions // to consumers so that the topic partition assignments are as balanced as possible. sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(prevAssignment, unassignedPartitions); log.info("Final assignment of partitions to consumers: \n{}", currentAssignment); return currentAssignment; } private List<TopicPartition> assignOwnedPartitions() { List<TopicPartition> assignedPartitions = new ArrayList<>(); for (Iterator<Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); String consumer = entry.getKey(); Subscription consumerSubscription = subscriptions.get(consumer); if (consumerSubscription == null) { // if a consumer that existed before (and had some partition assignments) is now removed, remove it from currentAssignment for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { // otherwise (the consumer still exists) for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!topic2AllPotentialConsumers.containsKey(partition.topic())) { // if this topic partition of this consumer no longer exists, remove it from currentAssignment of the consumer partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!consumerSubscription.topics().contains(partition.topic()) || rackInfo.racksMismatch(consumer, partition)) { // if the consumer is no longer subscribed to its topic or if racks don't match for rack-aware assignment, // remove it from currentAssignment of the consumer partitionIter.remove(); revocationRequired = true; } else { // otherwise, remove the topic partition from those that need to be assigned only if // its current consumer is still subscribed to its topic (because it is already assigned // and we would want to preserve that assignment as much as possible) assignedPartitions.add(partition); } } } } return assignedPartitions; } /** * get the unassigned partition list by computing the difference set of the sortedPartitions(all partitions) * and sortedAssignedPartitions. If no assigned partitions, we'll just return all sorted topic partitions. * * We loop the sortedPartition, and compare the ith element in sortedAssignedPartitions(i start from 0): * - if not equal to the ith element, add to unassignedPartitions * - if equal to the ith element, get next element from sortedAssignedPartitions * * @param sortedAssignedPartitions: sorted partitions, all are included in the sortedPartitions * @return partitions that aren't assigned to any current consumer */ private List<TopicPartition> getUnassignedPartitions(List<TopicPartition> sortedAssignedPartitions) { if (sortedAssignedPartitions.isEmpty()) { return sortedAllPartitions; } List<TopicPartition> unassignedPartitions = new ArrayList<>(); Collections.sort(sortedAssignedPartitions, new PartitionComparator(topic2AllPotentialConsumers)); boolean shouldAddDirectly = false; Iterator<TopicPartition> sortedAssignedPartitionsIter = sortedAssignedPartitions.iterator(); TopicPartition nextAssignedPartition = sortedAssignedPartitionsIter.next(); for (TopicPartition topicPartition : sortedAllPartitions) { if (shouldAddDirectly || !nextAssignedPartition.equals(topicPartition)) { unassignedPartitions.add(topicPartition); } else { // this partition is in assignedPartitions, don't add to unassignedPartitions, just get next assigned partition if (sortedAssignedPartitionsIter.hasNext()) { nextAssignedPartition = sortedAssignedPartitionsIter.next(); } else { // add the remaining directly since there is no more sortedAssignedPartitions shouldAddDirectly = true; } } } return unassignedPartitions; } /** * update the prevAssignment with the partitions, consumer and generation in parameters * * @param partitions: The partitions to be updated the prevAssignment * @param consumer: The consumer Id * @param prevAssignment: The assignment contains the assignment with the 2nd largest generation * @param generation: The generation of this assignment (partitions) */ private void updatePrevAssignment(Map<TopicPartition, ConsumerGenerationPair> prevAssignment, List<TopicPartition> partitions, String consumer, int generation) { for (TopicPartition partition: partitions) { if (prevAssignment.containsKey(partition)) { // only keep the latest previous assignment if (generation > prevAssignment.get(partition).generation) { prevAssignment.put(partition, new ConsumerGenerationPair(consumer, generation)); } } else { prevAssignment.put(partition, new ConsumerGenerationPair(consumer, generation)); } } } /** * filling in the prevAssignment from the subscriptions. * * @param prevAssignment: The assignment contains the assignment with the 2nd largest generation */ private void prepopulateCurrentAssignments(Map<TopicPartition, ConsumerGenerationPair> prevAssignment) { // we need to process subscriptions' user data with each consumer's reported generation in mind // higher generations overwrite lower generations in case of a conflict // note that a conflict could exist only if user data is for different generations for (Map.Entry<String, Subscription> subscriptionEntry: subscriptions.entrySet()) { String consumer = subscriptionEntry.getKey(); Subscription subscription = subscriptionEntry.getValue(); if (subscription.userData() != null) { // since this is our 2nd time to deserialize memberData, rewind userData is necessary subscription.userData().rewind(); } MemberData memberData = memberData(subscription); // we already have the maxGeneration info, so just compare the current generation of memberData, and put into prevAssignment if (memberData.generation.isPresent() && memberData.generation.get() < maxGeneration) { // if the current member's generation is lower than maxGeneration, put into prevAssignment if needed updatePrevAssignment(prevAssignment, memberData.partitions, consumer, memberData.generation.get()); } else if (!memberData.generation.isPresent() && maxGeneration > DEFAULT_GENERATION) { // if maxGeneration is larger than DEFAULT_GENERATION // put all (no generation) partitions as DEFAULT_GENERATION into prevAssignment if needed updatePrevAssignment(prevAssignment, memberData.partitions, consumer, DEFAULT_GENERATION); } } } /** * determine if the current assignment is a balanced one * * @return true if the given assignment is balanced; false otherwise */ private boolean isBalanced() { int min = currentAssignment.get(sortedCurrentSubscriptions.first()).size(); int max = currentAssignment.get(sortedCurrentSubscriptions.last()).size(); if (min >= max - 1) // if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true return true; // create a mapping from partitions to the consumer assigned to them final Map<TopicPartition, String> allPartitions = new HashMap<>(); Set<Entry<String, List<TopicPartition>>> assignments = currentAssignment.entrySet(); for (Map.Entry<String, List<TopicPartition>> entry: assignments) { List<TopicPartition> topicPartitions = entry.getValue(); for (TopicPartition topicPartition: topicPartitions) { if (allPartitions.containsKey(topicPartition)) log.error("{} is assigned to more than one consumer.", topicPartition); allPartitions.put(topicPartition, entry.getKey()); } } // for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it // could but did not get cannot be moved to it (because that would break the balance) for (String consumer: sortedCurrentSubscriptions) { List<TopicPartition> consumerPartitions = currentAssignment.get(consumer); int consumerPartitionCount = consumerPartitions.size(); // skip if this consumer already has all the topic partitions it can get List<String> allSubscribedTopics = consumer2AllPotentialTopics.get(consumer); int maxAssignmentSize = getMaxAssignmentSize(allSubscribedTopics); if (consumerPartitionCount == maxAssignmentSize) continue; // otherwise make sure it cannot get any more for (String topic: allSubscribedTopics) { int partitionCount = partitionsPerTopic.get(topic).size(); for (int i = 0; i < partitionCount; i++) { TopicPartition topicPartition = new TopicPartition(topic, i); if (!currentAssignment.get(consumer).contains(topicPartition)) { String otherConsumer = allPartitions.get(topicPartition); int otherConsumerPartitionCount = currentAssignment.get(otherConsumer).size(); if (consumerPartitionCount < otherConsumerPartitionCount) { log.debug("{} can be moved from consumer {} to consumer {} for a more balanced assignment.", topicPartition, otherConsumer, consumer); return false; } } } } } return true; } /** * get the maximum assigned partition size of the {@code allSubscribedTopics} * * @param allSubscribedTopics the subscribed topics of a consumer * @return maximum assigned partition size */ private int getMaxAssignmentSize(List<String> allSubscribedTopics) { int maxAssignmentSize; if (allSubscribedTopics.size() == partitionsPerTopic.size()) { maxAssignmentSize = totalPartitionsCount; } else { maxAssignmentSize = allSubscribedTopics.stream().map(partitionsPerTopic::get).map(List::size).reduce(0, Integer::sum); } return maxAssignmentSize; } /** * @return the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs. * A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0. * Lower balance score indicates a more balanced assignment. */ private int getBalanceScore(Map<String, List<TopicPartition>> assignment) { int score = 0; Map<String, Integer> consumer2AssignmentSize = new HashMap<>(); for (Entry<String, List<TopicPartition>> entry: assignment.entrySet()) consumer2AssignmentSize.put(entry.getKey(), entry.getValue().size()); Iterator<Entry<String, Integer>> it = consumer2AssignmentSize.entrySet().iterator(); while (it.hasNext()) { Entry<String, Integer> entry = it.next(); int consumerAssignmentSize = entry.getValue(); it.remove(); for (Entry<String, Integer> otherEntry: consumer2AssignmentSize.entrySet()) score += Math.abs(consumerAssignmentSize - otherEntry.getValue()); } return score; } /** * The assignment should improve the overall balance of the partition assignments to consumers. */ private boolean maybeAssignPartition(TopicPartition partition, RackInfo rackInfo) { for (String consumer: sortedCurrentSubscriptions) { if (consumer2AllPotentialTopics.get(consumer).contains(partition.topic()) && (rackInfo == null || !rackInfo.racksMismatch(consumer, partition))) { sortedCurrentSubscriptions.remove(consumer); currentAssignment.get(consumer).add(partition); currentPartitionConsumer.put(partition, consumer); sortedCurrentSubscriptions.add(consumer); return true; } } return false; } /** * attempt to assign all unassigned partitions * * @param unassignedPartitions partitions that are still unassigned * @param rackInfo rack information used to match racks. If null, no rack-matching is performed * @param removeAssigned flag that indicates if assigned partitions should be removed from `unassignedPartitions` */ private void maybeAssign(List<TopicPartition> unassignedPartitions, RackInfo rackInfo, boolean removeAssigned) { // assign all unassigned partitions for (Iterator<TopicPartition> iter = unassignedPartitions.iterator(); iter.hasNext();) { TopicPartition partition = iter.next(); // skip if there is no potential consumer for the topic if (topic2AllPotentialConsumers.get(partition.topic()).isEmpty()) continue; if (maybeAssignPartition(partition, rackInfo) && removeAssigned) iter.remove(); } } private boolean canTopicParticipateInReassignment(String topic) { // if a topic has two or more potential consumers it is subject to reassignment. return topic2AllPotentialConsumers.get(topic).size() >= 2; } private boolean canConsumerParticipateInReassignment(String consumer) { List<TopicPartition> currentPartitions = currentAssignment.get(consumer); int currentAssignmentSize = currentPartitions.size(); List<String> allSubscribedTopics = consumer2AllPotentialTopics.get(consumer); int maxAssignmentSize = getMaxAssignmentSize(allSubscribedTopics); if (currentAssignmentSize > maxAssignmentSize) log.error("The consumer {} is assigned more partitions than the maximum possible.", consumer); if (currentAssignmentSize < maxAssignmentSize) // if a consumer is not assigned all its potential partitions it is subject to reassignment return true; for (TopicPartition partition: currentPartitions) // if any of the partitions assigned to a consumer is subject to reassignment the consumer itself // is subject to reassignment if (canTopicParticipateInReassignment(partition.topic())) return true; return false; } /** * Balance the current assignment using the data structures created in the assignPartitions(...) method above. */ private void balance(Map<TopicPartition, ConsumerGenerationPair> prevAssignment, List<TopicPartition> unassignedPartitions) { boolean initializing = currentAssignment.get(sortedCurrentSubscriptions.last()).isEmpty(); // First assign with rack matching and then assign any remaining without rack matching List<TopicPartition> partitionsToAssign = unassignedPartitions; if (!rackInfo.consumerRacks.isEmpty()) { partitionsToAssign = new LinkedList<>(unassignedPartitions); maybeAssign(partitionsToAssign, rackInfo, true); } maybeAssign(partitionsToAssign, null, false); // narrow down the reassignment scope to only those partitions that can actually be reassigned Set<TopicPartition> fixedPartitions = new HashSet<>(); for (String topic: topic2AllPotentialConsumers.keySet()) if (!canTopicParticipateInReassignment(topic)) { for (int i = 0; i < partitionsPerTopic.get(topic).size(); i++) { fixedPartitions.add(new TopicPartition(topic, i)); } } sortedAllPartitions.removeAll(fixedPartitions); unassignedPartitions.removeAll(fixedPartitions); // narrow down the reassignment scope to only those consumers that are subject to reassignment Map<String, List<TopicPartition>> fixedAssignments = new HashMap<>(); for (String consumer: consumer2AllPotentialTopics.keySet()) if (!canConsumerParticipateInReassignment(consumer)) { sortedCurrentSubscriptions.remove(consumer); fixedAssignments.put(consumer, currentAssignment.remove(consumer)); } // create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later Map<String, List<TopicPartition>> preBalanceAssignment = deepCopy(currentAssignment); Map<TopicPartition, String> preBalancePartitionConsumers = new HashMap<>(currentPartitionConsumer); // if we don't already need to revoke something due to subscription changes, first try to balance by only moving newly added partitions if (!revocationRequired) { performReassignments(unassignedPartitions, prevAssignment); } boolean reassignmentPerformed = performReassignments(sortedAllPartitions, prevAssignment); // if we are not preserving existing assignments and we have made changes to the current assignment // make sure we are getting a more balanced assignment; otherwise, revert to previous assignment if (!initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment)) { deepCopy(preBalanceAssignment, currentAssignment); currentPartitionConsumer.clear(); currentPartitionConsumer.putAll(preBalancePartitionConsumers); } // add the fixed assignments (those that could not change) back for (Entry<String, List<TopicPartition>> entry: fixedAssignments.entrySet()) { String consumer = entry.getKey(); currentAssignment.put(consumer, entry.getValue()); sortedCurrentSubscriptions.add(consumer); } fixedAssignments.clear(); } private boolean performReassignments(List<TopicPartition> reassignablePartitions, Map<TopicPartition, ConsumerGenerationPair> prevAssignment) { boolean reassignmentPerformed = false; boolean modified; // repeat reassignment until no partition can be moved to improve the balance do { modified = false; // reassign all reassignable partitions (starting from the partition with least potential consumers and if needed) // until the full list is processed or a balance is achieved Iterator<TopicPartition> partitionIterator = reassignablePartitions.iterator(); while (partitionIterator.hasNext() && !isBalanced()) { TopicPartition partition = partitionIterator.next(); // the partition must have at least two consumers if (topic2AllPotentialConsumers.get(partition.topic()).size() <= 1) log.error("Expected more than one potential consumer for partition '{}'", partition); // the partition must have a current consumer String consumer = currentPartitionConsumer.get(partition); if (consumer == null) log.error("Expected partition '{}' to be assigned to a consumer", partition); if (prevAssignment.containsKey(partition) && currentAssignment.get(consumer).size() > currentAssignment.get(prevAssignment.get(partition).consumer).size() + 1) { reassignPartition(partition, prevAssignment.get(partition).consumer); reassignmentPerformed = true; modified = true; continue; } // check if a better-suited consumer exist for the partition; if so, reassign it // Use consumer within rack if possible String consumerRack = rackInfo.consumerRacks.get(consumer); Set<String> partitionRacks = rackInfo.partitionRacks.get(partition); boolean foundRackConsumer = false; if (consumerRack != null && !partitionRacks.isEmpty() && partitionRacks.contains(consumerRack)) { for (String otherConsumer : topic2AllPotentialConsumers.get(partition.topic())) { String otherConsumerRack = rackInfo.consumerRacks.get(otherConsumer); if (otherConsumerRack == null || !partitionRacks.contains(otherConsumerRack)) continue; if (currentAssignment.get(consumer).size() > currentAssignment.get(otherConsumer).size() + 1) { reassignPartition(partition); reassignmentPerformed = true; modified = true; foundRackConsumer = true; break; } } } if (!foundRackConsumer) { for (String otherConsumer : topic2AllPotentialConsumers.get(partition.topic())) { if (currentAssignment.get(consumer).size() > currentAssignment.get(otherConsumer).size() + 1) { reassignPartition(partition); reassignmentPerformed = true; modified = true; break; } } } } } while (modified); return reassignmentPerformed; } private void reassignPartition(TopicPartition partition) { // find the new consumer String newConsumer = null; for (String anotherConsumer: sortedCurrentSubscriptions) { if (consumer2AllPotentialTopics.get(anotherConsumer).contains(partition.topic())) { newConsumer = anotherConsumer; break; } } assert newConsumer != null; reassignPartition(partition, newConsumer); } private void reassignPartition(TopicPartition partition, String newConsumer) { String consumer = currentPartitionConsumer.get(partition); // find the correct partition movement considering the stickiness requirement TopicPartition partitionToBeMoved = partitionMovements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer); processPartitionMovement(partitionToBeMoved, newConsumer); } private void processPartitionMovement(TopicPartition partition, String newConsumer) { String oldConsumer = currentPartitionConsumer.get(partition); sortedCurrentSubscriptions.remove(oldConsumer); sortedCurrentSubscriptions.remove(newConsumer); partitionMovements.movePartition(partition, oldConsumer, newConsumer); currentAssignment.get(oldConsumer).remove(partition); currentAssignment.get(newConsumer).add(partition); currentPartitionConsumer.put(partition, newConsumer); sortedCurrentSubscriptions.add(newConsumer); sortedCurrentSubscriptions.add(oldConsumer); } private void deepCopy(Map<String, List<TopicPartition>> source, Map<String, List<TopicPartition>> dest) { dest.clear(); for (Entry<String, List<TopicPartition>> entry: source.entrySet()) dest.put(entry.getKey(), new ArrayList<>(entry.getValue())); } private Map<String, List<TopicPartition>> deepCopy(Map<String, List<TopicPartition>> assignment) { Map<String, List<TopicPartition>> copy = new HashMap<>(); deepCopy(assignment, copy); return copy; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/AsyncClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.common.Node; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; public abstract class AsyncClient<T1, Req extends AbstractRequest, Resp extends AbstractResponse, T2> { private final Logger log; private final ConsumerNetworkClient client; AsyncClient(ConsumerNetworkClient client, LogContext logContext) { this.client = client; this.log = logContext.logger(getClass()); } public RequestFuture<T2> sendAsyncRequest(Node node, T1 requestData) { AbstractRequest.Builder<Req> requestBuilder = prepareRequest(node, requestData); return client.send(node, requestBuilder).compose(new RequestFutureAdapter<ClientResponse, T2>() { @Override @SuppressWarnings("unchecked") public void onSuccess(ClientResponse value, RequestFuture<T2> future) { Resp resp; try { resp = (Resp) value.responseBody(); } catch (ClassCastException cce) { log.error("Could not cast response body", cce); future.raise(cce); return; } log.trace("Received {} {} from broker {}", resp.getClass().getSimpleName(), resp, node); try { future.complete(handleResponse(node, requestData, resp)); } catch (RuntimeException e) { if (!future.isDone()) { future.raise(e); } } } @Override public void onFailure(RuntimeException e, RequestFuture<T2> future1) { future1.raise(e); } }); } protected Logger logger() { return log; } protected abstract AbstractRequest.Builder<Req> prepareRequest(Node node, T1 requestData); protected abstract T2 handleResponse(Node node, T1 requestData, Resp response); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.RetriableCommitFailedException; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.requests.OffsetCommitRequest; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.slf4j.Logger; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Queue; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; public class CommitRequestManager implements RequestManager { // TODO: current in ConsumerConfig but inaccessible in the internal package. private static final String THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED = "internal.throw.on.fetch.stable.offset.unsupported"; // TODO: We will need to refactor the subscriptionState private final SubscriptionState subscriptionState; private final Logger log; private final Optional<AutoCommitState> autoCommitState; private final CoordinatorRequestManager coordinatorRequestManager; private final GroupState groupState; private final long retryBackoffMs; private final boolean throwOnFetchStableOffsetUnsupported; final PendingRequests pendingRequests; public CommitRequestManager( final Time time, final LogContext logContext, final SubscriptionState subscriptionState, final ConsumerConfig config, final CoordinatorRequestManager coordinatorRequestManager, final GroupState groupState) { Objects.requireNonNull(coordinatorRequestManager, "Coordinator is needed upon committing offsets"); this.log = logContext.logger(getClass()); this.pendingRequests = new PendingRequests(); if (config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)) { final long autoCommitInterval = Integer.toUnsignedLong(config.getInt(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG)); this.autoCommitState = Optional.of(new AutoCommitState(time, autoCommitInterval)); } else { this.autoCommitState = Optional.empty(); } this.coordinatorRequestManager = coordinatorRequestManager; this.groupState = groupState; this.subscriptionState = subscriptionState; this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.throwOnFetchStableOffsetUnsupported = config.getBoolean(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED); } /** * Poll for the {@link OffsetFetchRequest} and {@link OffsetCommitRequest} request if there's any. The function will * also try to autocommit the offsets, if feature is enabled. */ @Override public NetworkClientDelegate.PollResult poll(final long currentTimeMs) { maybeAutoCommit(); if (!pendingRequests.hasUnsentRequests()) { return new NetworkClientDelegate.PollResult(Long.MAX_VALUE, Collections.emptyList()); } return new NetworkClientDelegate.PollResult(Long.MAX_VALUE, Collections.unmodifiableList(pendingRequests.drain(currentTimeMs))); } private void maybeAutoCommit() { if (!autoCommitState.isPresent()) { return; } AutoCommitState autocommit = autoCommitState.get(); if (!autocommit.canSendAutocommit()) { return; } Map<TopicPartition, OffsetAndMetadata> allConsumedOffsets = subscriptionState.allConsumed(); sendAutoCommit(allConsumedOffsets); autocommit.resetTimer(); autocommit.setInflightCommitStatus(true); } /** * Handles {@link org.apache.kafka.clients.consumer.internals.events.CommitApplicationEvent}. It creates an * {@link OffsetCommitRequestState} and enqueue it to send later. */ public CompletableFuture<ClientResponse> addOffsetCommitRequest(final Map<TopicPartition, OffsetAndMetadata> offsets) { return pendingRequests.addOffsetCommitRequest(offsets); } /** * Handles {@link org.apache.kafka.clients.consumer.internals.events.OffsetFetchApplicationEvent}. It creates an * {@link OffsetFetchRequestState} and enqueue it to send later. */ public CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> addOffsetFetchRequest(final Set<TopicPartition> partitions) { return pendingRequests.addOffsetFetchRequest(partitions); } public void updateAutoCommitTimer(final long currentTimeMs) { this.autoCommitState.ifPresent(t -> t.ack(currentTimeMs)); } // Visible for testing List<OffsetFetchRequestState> unsentOffsetFetchRequests() { return pendingRequests.unsentOffsetFetches; } // Visible for testing Queue<OffsetCommitRequestState> unsentOffsetCommitRequests() { return pendingRequests.unsentOffsetCommits; } // Visible for testing CompletableFuture<ClientResponse> sendAutoCommit(final Map<TopicPartition, OffsetAndMetadata> allConsumedOffsets) { log.debug("Enqueuing autocommit offsets: {}", allConsumedOffsets); return this.addOffsetCommitRequest(allConsumedOffsets) .whenComplete((response, throwable) -> { this.autoCommitState.ifPresent(autoCommitState -> autoCommitState.setInflightCommitStatus(false)); if (throwable == null) { log.debug("Completed asynchronous auto-commit of offsets {}", allConsumedOffsets); } }) .exceptionally(t -> { if (t instanceof RetriableCommitFailedException) { log.debug("Asynchronous auto-commit of offsets {} failed due to retriable error: {}", allConsumedOffsets, t); } else { log.warn("Asynchronous auto-commit of offsets {} failed: {}", allConsumedOffsets, t.getMessage()); } return null; }); } private class OffsetCommitRequestState { private final Map<TopicPartition, OffsetAndMetadata> offsets; private final String groupId; private final GroupState.Generation generation; private final String groupInstanceId; private final NetworkClientDelegate.FutureCompletionHandler future; public OffsetCommitRequestState(final Map<TopicPartition, OffsetAndMetadata> offsets, final String groupId, final String groupInstanceId, final GroupState.Generation generation) { this.offsets = offsets; this.future = new NetworkClientDelegate.FutureCompletionHandler(); this.groupId = groupId; this.generation = generation; this.groupInstanceId = groupInstanceId; } public CompletableFuture<ClientResponse> future() { return future.future(); } public NetworkClientDelegate.UnsentRequest toUnsentRequest() { Map<String, OffsetCommitRequestData.OffsetCommitRequestTopic> requestTopicDataMap = new HashMap<>(); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition topicPartition = entry.getKey(); OffsetAndMetadata offsetAndMetadata = entry.getValue(); OffsetCommitRequestData.OffsetCommitRequestTopic topic = requestTopicDataMap .getOrDefault(topicPartition.topic(), new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName(topicPartition.topic()) ); topic.partitions().add(new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(topicPartition.partition()) .setCommittedOffset(offsetAndMetadata.offset()) .setCommittedLeaderEpoch(offsetAndMetadata.leaderEpoch().orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH)) .setCommittedMetadata(offsetAndMetadata.metadata()) ); requestTopicDataMap.put(topicPartition.topic(), topic); } OffsetCommitRequest.Builder builder = new OffsetCommitRequest.Builder( new OffsetCommitRequestData() .setGroupId(this.groupId) .setGenerationId(generation.generationId) .setMemberId(generation.memberId) .setGroupInstanceId(groupInstanceId) .setTopics(new ArrayList<>(requestTopicDataMap.values()))); return new NetworkClientDelegate.UnsentRequest( builder, coordinatorRequestManager.coordinator(), future); } } private class OffsetFetchRequestState extends RequestState { public final Set<TopicPartition> requestedPartitions; public final GroupState.Generation requestedGeneration; public CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> future; public OffsetFetchRequestState(final Set<TopicPartition> partitions, final GroupState.Generation generation, final long retryBackoffMs) { super(retryBackoffMs); this.requestedPartitions = partitions; this.requestedGeneration = generation; this.future = new CompletableFuture<>(); } public boolean sameRequest(final OffsetFetchRequestState request) { return Objects.equals(requestedGeneration, request.requestedGeneration) && requestedPartitions.equals(request.requestedPartitions); } public NetworkClientDelegate.UnsentRequest toUnsentRequest(final long currentTimeMs) { OffsetFetchRequest.Builder builder = new OffsetFetchRequest.Builder( groupState.groupId, true, new ArrayList<>(this.requestedPartitions), throwOnFetchStableOffsetUnsupported); NetworkClientDelegate.UnsentRequest unsentRequest = new NetworkClientDelegate.UnsentRequest( builder, coordinatorRequestManager.coordinator()); unsentRequest.future().whenComplete((r, t) -> { onResponse(currentTimeMs, (OffsetFetchResponse) r.responseBody()); }); return unsentRequest; } public void onResponse( final long currentTimeMs, final OffsetFetchResponse response) { Errors responseError = response.groupLevelError(groupState.groupId); if (responseError != Errors.NONE) { onFailure(currentTimeMs, responseError); return; } onSuccess(currentTimeMs, response); } private void onFailure(final long currentTimeMs, final Errors responseError) { log.debug("Offset fetch failed: {}", responseError.message()); // TODO: should we retry on COORDINATOR_NOT_AVAILABLE as well ? if (responseError == Errors.COORDINATOR_LOAD_IN_PROGRESS) { retry(currentTimeMs); } else if (responseError == Errors.NOT_COORDINATOR) { // re-discover the coordinator and retry coordinatorRequestManager.markCoordinatorUnknown(responseError.message(), Time.SYSTEM.milliseconds()); retry(currentTimeMs); } else if (responseError == Errors.GROUP_AUTHORIZATION_FAILED) { future.completeExceptionally(GroupAuthorizationException.forGroupId(groupState.groupId)); } else { future.completeExceptionally(new KafkaException("Unexpected error in fetch offset response: " + responseError.message())); } } private void retry(final long currentTimeMs) { onFailedAttempt(currentTimeMs); onSendAttempt(currentTimeMs); pendingRequests.addOffsetFetchRequest(this); } private void onSuccess(final long currentTimeMs, final OffsetFetchResponse response) { Set<String> unauthorizedTopics = null; Map<TopicPartition, OffsetFetchResponse.PartitionData> responseData = response.partitionDataMap(groupState.groupId); Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(responseData.size()); Set<TopicPartition> unstableTxnOffsetTopicPartitions = new HashSet<>(); for (Map.Entry<TopicPartition, OffsetFetchResponse.PartitionData> entry : responseData.entrySet()) { TopicPartition tp = entry.getKey(); OffsetFetchResponse.PartitionData partitionData = entry.getValue(); if (partitionData.hasError()) { Errors error = partitionData.error; log.debug("Failed to fetch offset for partition {}: {}", tp, error.message()); if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { future.completeExceptionally(new KafkaException("Topic or Partition " + tp + " does " + "not " + "exist")); return; } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { if (unauthorizedTopics == null) { unauthorizedTopics = new HashSet<>(); } unauthorizedTopics.add(tp.topic()); } else if (error == Errors.UNSTABLE_OFFSET_COMMIT) { unstableTxnOffsetTopicPartitions.add(tp); } else { future.completeExceptionally(new KafkaException("Unexpected error in fetch offset " + "response for partition " + tp + ": " + error.message())); return; } } else if (partitionData.offset >= 0) { // record the position with the offset (-1 indicates no committed offset to fetch); // if there's no committed offset, record as null offsets.put(tp, new OffsetAndMetadata(partitionData.offset, partitionData.leaderEpoch, partitionData.metadata)); } else { log.info("Found no committed offset for partition {}", tp); offsets.put(tp, null); } } if (unauthorizedTopics != null) { future.completeExceptionally(new TopicAuthorizationException(unauthorizedTopics)); } else if (!unstableTxnOffsetTopicPartitions.isEmpty()) { // TODO: Optimization question: Do we need to retry all partitions upon a single partition error? log.info("The following partitions still have unstable offsets " + "which are not cleared on the broker side: {}" + ", this could be either " + "transactional offsets waiting for completion, or " + "normal offsets waiting for replication after appending to local log", unstableTxnOffsetTopicPartitions); retry(currentTimeMs); } else { future.complete(offsets); } } private CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> chainFuture(final CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> future) { return this.future.whenComplete((r, t) -> { if (t != null) { future.completeExceptionally(t); } else { future.complete(r); } }); } } /** * <p>This is used to stage the unsent {@link OffsetCommitRequestState} and {@link OffsetFetchRequestState}. * <li>unsentOffsetCommits holds the offset commit requests that have not been sent out</> * <li>unsentOffsetFetches holds the offset fetch requests that have not been sent out</li> * <li>inflightOffsetFetches holds the offset fetch requests that have been sent out but incompleted</>. * * {@code addOffsetFetchRequest} dedupes the requests to avoid sending the same requests. */ class PendingRequests { // Queue is used to ensure the sequence of commit Queue<OffsetCommitRequestState> unsentOffsetCommits = new LinkedList<>(); List<OffsetFetchRequestState> unsentOffsetFetches = new ArrayList<>(); List<OffsetFetchRequestState> inflightOffsetFetches = new ArrayList<>(); public boolean hasUnsentRequests() { return !unsentOffsetCommits.isEmpty() || !unsentOffsetFetches.isEmpty(); } public CompletableFuture<ClientResponse> addOffsetCommitRequest(final Map<TopicPartition, OffsetAndMetadata> offsets) { // TODO: Dedupe committing the same offsets to the same partitions OffsetCommitRequestState request = new OffsetCommitRequestState( offsets, groupState.groupId, groupState.groupInstanceId.orElse(null), groupState.generation); unsentOffsetCommits.add(request); return request.future(); } /** * <p>Adding an offset fetch request to the outgoing buffer. If the same request was made, we chain the future * to the existing one. * * <p>If the request is new, it invokes a callback to remove itself from the {@code inflightOffsetFetches} * upon completion.</> */ private CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> addOffsetFetchRequest(final OffsetFetchRequestState request) { Optional<OffsetFetchRequestState> dupe = unsentOffsetFetches.stream().filter(r -> r.sameRequest(request)).findAny(); Optional<OffsetFetchRequestState> inflight = inflightOffsetFetches.stream().filter(r -> r.sameRequest(request)).findAny(); if (dupe.isPresent() || inflight.isPresent()) { log.info("Duplicated OffsetFetchRequest: " + request.requestedPartitions); dupe.orElseGet(() -> inflight.get()).chainFuture(request.future); } else { // remove the request from the outbound buffer: inflightOffsetFetches request.future.whenComplete((r, t) -> { if (!inflightOffsetFetches.remove(request)) { log.warn("A duplicated, inflight, request was identified, but unable to find it in the " + "outbound buffer:" + request); } }); this.unsentOffsetFetches.add(request); } return request.future; } private CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> addOffsetFetchRequest(final Set<TopicPartition> partitions) { OffsetFetchRequestState request = new OffsetFetchRequestState( partitions, groupState.generation, retryBackoffMs); return addOffsetFetchRequest(request); } /** * Clear {@code unsentOffsetCommits} and moves all the sendable request in {@code unsentOffsetFetches} to the * {@code inflightOffsetFetches} to bookkeep all of the inflight requests. * * Note: Sendable requests are determined by their timer as we are expecting backoff on failed attempt. See * {@link RequestState}. **/ public List<NetworkClientDelegate.UnsentRequest> drain(final long currentTimeMs) { List<NetworkClientDelegate.UnsentRequest> unsentRequests = new ArrayList<>(); // Add all unsent offset commit requests to the unsentRequests list unsentRequests.addAll( unsentOffsetCommits.stream() .map(OffsetCommitRequestState::toUnsentRequest) .collect(Collectors.toList())); // Partition the unsent offset fetch requests into sendable and non-sendable lists Map<Boolean, List<OffsetFetchRequestState>> partitionedBySendability = unsentOffsetFetches.stream() .collect(Collectors.partitioningBy(request -> request.canSendRequest(currentTimeMs))); // Add all sendable offset fetch requests to the unsentRequests list and to the inflightOffsetFetches list for (OffsetFetchRequestState request : partitionedBySendability.get(true)) { request.onSendAttempt(currentTimeMs); unsentRequests.add(request.toUnsentRequest(currentTimeMs)); inflightOffsetFetches.add(request); } // Clear the unsent offset commit and fetch lists and add all non-sendable offset fetch requests to the unsentOffsetFetches list unsentOffsetCommits.clear(); unsentOffsetFetches.clear(); unsentOffsetFetches.addAll(partitionedBySendability.get(false)); return Collections.unmodifiableList(unsentRequests); } } /** * Encapsulates the state of auto-committing and manages the auto-commit timer. */ private static class AutoCommitState { private final Timer timer; private final long autoCommitInterval; private boolean hasInflightCommit; public AutoCommitState( final Time time, final long autoCommitInterval) { this.autoCommitInterval = autoCommitInterval; this.timer = time.timer(autoCommitInterval); this.hasInflightCommit = false; } public boolean canSendAutocommit() { return !this.hasInflightCommit && this.timer.isExpired(); } public void resetTimer() { this.timer.reset(autoCommitInterval); } public void ack(final long currentTimeMs) { this.timer.update(currentTimeMs); } public void setInflightCommitStatus(final boolean inflightCommitStatus) { this.hasInflightCommit = inflightCommitStatus; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/CompletedFetch.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.errors.RecordDeserializationException; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.record.ControlRecordType; import org.apache.kafka.common.record.Record; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.common.requests.FetchResponse; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.CloseableIterator; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import java.io.Closeable; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Optional; import java.util.PriorityQueue; import java.util.Set; /** * {@link CompletedFetch} represents a {@link RecordBatch batch} of {@link Record records} that was returned from the * broker via a {@link FetchRequest}. It contains logic to maintain state between calls to {@link #fetchRecords(int)}. * * @param <K> Record key type * @param <V> Record value type */ class CompletedFetch<K, V> { final TopicPartition partition; final FetchResponseData.PartitionData partitionData; final short requestVersion; long nextFetchOffset; Optional<Integer> lastEpoch; boolean isConsumed = false; boolean initialized = false; private final Logger log; private final SubscriptionState subscriptions; private final FetchConfig<K, V> fetchConfig; private final BufferSupplier decompressionBufferSupplier; private final Iterator<? extends RecordBatch> batches; private final Set<Long> abortedProducerIds; private final PriorityQueue<FetchResponseData.AbortedTransaction> abortedTransactions; private final FetchMetricsAggregator metricAggregator; private int recordsRead; private int bytesRead; private RecordBatch currentBatch; private Record lastRecord; private CloseableIterator<Record> records; private Exception cachedRecordException = null; private boolean corruptLastRecord = false; CompletedFetch(LogContext logContext, SubscriptionState subscriptions, FetchConfig<K, V> fetchConfig, BufferSupplier decompressionBufferSupplier, TopicPartition partition, FetchResponseData.PartitionData partitionData, FetchMetricsAggregator metricAggregator, Long fetchOffset, short requestVersion) { this.log = logContext.logger(CompletedFetch.class); this.subscriptions = subscriptions; this.fetchConfig = fetchConfig; this.decompressionBufferSupplier = decompressionBufferSupplier; this.partition = partition; this.partitionData = partitionData; this.metricAggregator = metricAggregator; this.batches = FetchResponse.recordsOrFail(partitionData).batches().iterator(); this.nextFetchOffset = fetchOffset; this.requestVersion = requestVersion; this.lastEpoch = Optional.empty(); this.abortedProducerIds = new HashSet<>(); this.abortedTransactions = abortedTransactions(partitionData); } /** * After each partition is parsed, we update the current metric totals with the total bytes * and number of records parsed. After all partitions have reported, we write the metric. */ void recordAggregatedMetrics(int bytes, int records) { metricAggregator.record(partition, bytes, records); } /** * Draining a {@link CompletedFetch} will signal that the data has been consumed and the underlying resources * are closed. This is somewhat analogous to {@link Closeable#close() closing}, though no error will result if a * caller invokes {@link #fetchRecords(int)}; an empty {@link List list} will be returned instead. */ void drain() { if (!isConsumed) { maybeCloseRecordStream(); cachedRecordException = null; this.isConsumed = true; recordAggregatedMetrics(bytesRead, recordsRead); // we move the partition to the end if we received some bytes. This way, it's more likely that partitions // for the same topic can remain together (allowing for more efficient serialization). if (bytesRead > 0) subscriptions.movePartitionToEnd(partition); } } private void maybeEnsureValid(RecordBatch batch) { if (fetchConfig.checkCrcs && batch.magic() >= RecordBatch.MAGIC_VALUE_V2) { try { batch.ensureValid(); } catch (CorruptRecordException e) { throw new KafkaException("Record batch for partition " + partition + " at offset " + batch.baseOffset() + " is invalid, cause: " + e.getMessage()); } } } private void maybeEnsureValid(Record record) { if (fetchConfig.checkCrcs) { try { record.ensureValid(); } catch (CorruptRecordException e) { throw new KafkaException("Record for partition " + partition + " at offset " + record.offset() + " is invalid, cause: " + e.getMessage()); } } } private void maybeCloseRecordStream() { if (records != null) { records.close(); records = null; } } private Record nextFetchedRecord() { while (true) { if (records == null || !records.hasNext()) { maybeCloseRecordStream(); if (!batches.hasNext()) { // Message format v2 preserves the last offset in a batch even if the last record is removed // through compaction. By using the next offset computed from the last offset in the batch, // we ensure that the offset of the next fetch will point to the next batch, which avoids // unnecessary re-fetching of the same batch (in the worst case, the consumer could get stuck // fetching the same batch repeatedly). if (currentBatch != null) nextFetchOffset = currentBatch.nextOffset(); drain(); return null; } currentBatch = batches.next(); lastEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch()); maybeEnsureValid(currentBatch); if (fetchConfig.isolationLevel == IsolationLevel.READ_COMMITTED && currentBatch.hasProducerId()) { // remove from the aborted transaction queue all aborted transactions which have begun // before the current batch's last offset and add the associated producerIds to the // aborted producer set consumeAbortedTransactionsUpTo(currentBatch.lastOffset()); long producerId = currentBatch.producerId(); if (containsAbortMarker(currentBatch)) { abortedProducerIds.remove(producerId); } else if (isBatchAborted(currentBatch)) { log.debug("Skipping aborted record batch from partition {} with producerId {} and " + "offsets {} to {}", partition, producerId, currentBatch.baseOffset(), currentBatch.lastOffset()); nextFetchOffset = currentBatch.nextOffset(); continue; } } records = currentBatch.streamingIterator(decompressionBufferSupplier); } else { Record record = records.next(); // skip any records out of range if (record.offset() >= nextFetchOffset) { // we only do validation when the message should not be skipped. maybeEnsureValid(record); // control records are not returned to the user if (!currentBatch.isControlBatch()) { return record; } else { // Increment the next fetch offset when we skip a control batch. nextFetchOffset = record.offset() + 1; } } } } } /** * The {@link RecordBatch batch} of {@link Record records} is converted to a {@link List list} of * {@link ConsumerRecord consumer records} and returned. {@link BufferSupplier Decompression} and * {@link Deserializer deserialization} of the {@link Record record's} key and value are performed in * this step. * * @param maxRecords The number of records to return; the number returned may be {@code 0 <= maxRecords} * @return {@link ConsumerRecord Consumer records} */ List<ConsumerRecord<K, V>> fetchRecords(int maxRecords) { // Error when fetching the next record before deserialization. if (corruptLastRecord) throw new KafkaException("Received exception when fetching the next record from " + partition + ". If needed, please seek past the record to " + "continue consumption.", cachedRecordException); if (isConsumed) return Collections.emptyList(); List<ConsumerRecord<K, V>> records = new ArrayList<>(); try { for (int i = 0; i < maxRecords; i++) { // Only move to next record if there was no exception in the last fetch. Otherwise, we should // use the last record to do deserialization again. if (cachedRecordException == null) { corruptLastRecord = true; lastRecord = nextFetchedRecord(); corruptLastRecord = false; } if (lastRecord == null) break; Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch()); TimestampType timestampType = currentBatch.timestampType(); ConsumerRecord<K, V> record = parseRecord(partition, leaderEpoch, timestampType, lastRecord); records.add(record); recordsRead++; bytesRead += lastRecord.sizeInBytes(); nextFetchOffset = lastRecord.offset() + 1; // In some cases, the deserialization may have thrown an exception and the retry may succeed, // we allow user to move forward in this case. cachedRecordException = null; } } catch (SerializationException se) { cachedRecordException = se; if (records.isEmpty()) throw se; } catch (KafkaException e) { cachedRecordException = e; if (records.isEmpty()) throw new KafkaException("Received exception when fetching the next record from " + partition + ". If needed, please seek past the record to " + "continue consumption.", e); } return records; } /** * Parse the record entry, deserializing the key / value fields if necessary */ ConsumerRecord<K, V> parseRecord(TopicPartition partition, Optional<Integer> leaderEpoch, TimestampType timestampType, Record record) { try { long offset = record.offset(); long timestamp = record.timestamp(); Headers headers = new RecordHeaders(record.headers()); ByteBuffer keyBytes = record.key(); byte[] keyByteArray = keyBytes == null ? null : org.apache.kafka.common.utils.Utils.toArray(keyBytes); K key = keyBytes == null ? null : fetchConfig.keyDeserializer.deserialize(partition.topic(), headers, keyByteArray); ByteBuffer valueBytes = record.value(); byte[] valueByteArray = valueBytes == null ? null : Utils.toArray(valueBytes); V value = valueBytes == null ? null : fetchConfig.valueDeserializer.deserialize(partition.topic(), headers, valueByteArray); return new ConsumerRecord<>(partition.topic(), partition.partition(), offset, timestamp, timestampType, keyByteArray == null ? ConsumerRecord.NULL_SIZE : keyByteArray.length, valueByteArray == null ? ConsumerRecord.NULL_SIZE : valueByteArray.length, key, value, headers, leaderEpoch); } catch (RuntimeException e) { throw new RecordDeserializationException(partition, record.offset(), "Error deserializing key/value for partition " + partition + " at offset " + record.offset() + ". If needed, please seek past the record to continue consumption.", e); } } private Optional<Integer> maybeLeaderEpoch(int leaderEpoch) { return leaderEpoch == RecordBatch.NO_PARTITION_LEADER_EPOCH ? Optional.empty() : Optional.of(leaderEpoch); } private void consumeAbortedTransactionsUpTo(long offset) { if (abortedTransactions == null) return; while (!abortedTransactions.isEmpty() && abortedTransactions.peek().firstOffset() <= offset) { FetchResponseData.AbortedTransaction abortedTransaction = abortedTransactions.poll(); abortedProducerIds.add(abortedTransaction.producerId()); } } private boolean isBatchAborted(RecordBatch batch) { return batch.isTransactional() && abortedProducerIds.contains(batch.producerId()); } private PriorityQueue<FetchResponseData.AbortedTransaction> abortedTransactions(FetchResponseData.PartitionData partition) { if (partition.abortedTransactions() == null || partition.abortedTransactions().isEmpty()) return null; PriorityQueue<FetchResponseData.AbortedTransaction> abortedTransactions = new PriorityQueue<>( partition.abortedTransactions().size(), Comparator.comparingLong(FetchResponseData.AbortedTransaction::firstOffset) ); abortedTransactions.addAll(partition.abortedTransactions()); return abortedTransactions; } private boolean containsAbortMarker(RecordBatch batch) { if (!batch.isControlBatch()) return false; Iterator<Record> batchIterator = batch.iterator(); if (!batchIterator.hasNext()) return false; Record firstRecord = batchIterator.next(); return ControlRecordType.ABORT == ControlRecordType.parse(firstRecord.key()); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import java.util.Arrays; import java.util.SortedSet; import java.util.TreeSet; import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.clients.consumer.CommitFailedException; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor; import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment; import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription; import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.RebalanceProtocol; import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetCommitCallback; import org.apache.kafka.clients.consumer.RetriableCommitFailedException; import org.apache.kafka.clients.consumer.internals.Utils.TopicPartitionComparator; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.FencedInstanceIdException; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.UnstableOffsetCommitException; import org.apache.kafka.common.errors.RebalanceInProgressException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.message.JoinGroupRequestData; import org.apache.kafka.common.message.JoinGroupResponseData; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitResponseData; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.requests.JoinGroupRequest; import org.apache.kafka.common.requests.OffsetCommitRequest; import org.apache.kafka.common.requests.OffsetCommitResponse; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.ConsumerConfig.ASSIGN_FROM_SUBSCRIBED_ASSIGNORS; import static org.apache.kafka.clients.consumer.CooperativeStickyAssignor.COOPERATIVE_STICKY_ASSIGNOR_NAME; /** * This class manages the coordination process with the consumer coordinator. */ public final class ConsumerCoordinator extends AbstractCoordinator { private final static TopicPartitionComparator COMPARATOR = new TopicPartitionComparator(); private final GroupRebalanceConfig rebalanceConfig; private final Logger log; private final List<ConsumerPartitionAssignor> assignors; private final ConsumerMetadata metadata; private final ConsumerCoordinatorMetrics sensors; private final SubscriptionState subscriptions; private final OffsetCommitCallback defaultOffsetCommitCallback; private final boolean autoCommitEnabled; private final int autoCommitIntervalMs; private final ConsumerInterceptors<?, ?> interceptors; private final AtomicInteger pendingAsyncCommits; // this collection must be thread-safe because it is modified from the response handler // of offset commit requests, which may be invoked from the heartbeat thread private final ConcurrentLinkedQueue<OffsetCommitCompletion> completedOffsetCommits; private boolean isLeader = false; private Set<String> joinedSubscription; private MetadataSnapshot metadataSnapshot; private MetadataSnapshot assignmentSnapshot; private Timer nextAutoCommitTimer; private AtomicBoolean asyncCommitFenced; private ConsumerGroupMetadata groupMetadata; private final boolean throwOnFetchStableOffsetsUnsupported; private final Optional<String> rackId; // hold onto request&future for committed offset requests to enable async calls. private PendingCommittedOffsetRequest pendingCommittedOffsetRequest = null; private static class PendingCommittedOffsetRequest { private final Set<TopicPartition> requestedPartitions; private final Generation requestedGeneration; private final RequestFuture<Map<TopicPartition, OffsetAndMetadata>> response; private PendingCommittedOffsetRequest(final Set<TopicPartition> requestedPartitions, final Generation generationAtRequestTime, final RequestFuture<Map<TopicPartition, OffsetAndMetadata>> response) { this.requestedPartitions = Objects.requireNonNull(requestedPartitions); this.response = Objects.requireNonNull(response); this.requestedGeneration = generationAtRequestTime; } private boolean sameRequest(final Set<TopicPartition> currentRequest, final Generation currentGeneration) { return Objects.equals(requestedGeneration, currentGeneration) && requestedPartitions.equals(currentRequest); } } private final RebalanceProtocol protocol; // pending commit offset request in onJoinPrepare private RequestFuture<Void> autoCommitOffsetRequestFuture = null; // a timer for join prepare to know when to stop. // it'll set to rebalance timeout so that the member can join the group successfully // even though offset commit failed. private Timer joinPrepareTimer = null; /** * Initialize the coordination manager. */ public ConsumerCoordinator(GroupRebalanceConfig rebalanceConfig, LogContext logContext, ConsumerNetworkClient client, List<ConsumerPartitionAssignor> assignors, ConsumerMetadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean throwOnFetchStableOffsetsUnsupported, String rackId) { super(rebalanceConfig, logContext, client, metrics, metricGrpPrefix, time); this.rebalanceConfig = rebalanceConfig; this.log = logContext.logger(ConsumerCoordinator.class); this.metadata = metadata; this.rackId = rackId == null || rackId.isEmpty() ? Optional.empty() : Optional.of(rackId); this.metadataSnapshot = new MetadataSnapshot(this.rackId, subscriptions, metadata.fetch(), metadata.updateVersion()); this.subscriptions = subscriptions; this.defaultOffsetCommitCallback = new DefaultOffsetCommitCallback(); this.autoCommitEnabled = autoCommitEnabled; this.autoCommitIntervalMs = autoCommitIntervalMs; this.assignors = assignors; this.completedOffsetCommits = new ConcurrentLinkedQueue<>(); this.sensors = new ConsumerCoordinatorMetrics(metrics, metricGrpPrefix); this.interceptors = interceptors; this.pendingAsyncCommits = new AtomicInteger(); this.asyncCommitFenced = new AtomicBoolean(false); this.groupMetadata = new ConsumerGroupMetadata(rebalanceConfig.groupId, JoinGroupRequest.UNKNOWN_GENERATION_ID, JoinGroupRequest.UNKNOWN_MEMBER_ID, rebalanceConfig.groupInstanceId); this.throwOnFetchStableOffsetsUnsupported = throwOnFetchStableOffsetsUnsupported; if (autoCommitEnabled) this.nextAutoCommitTimer = time.timer(autoCommitIntervalMs); // select the rebalance protocol such that: // 1. only consider protocols that are supported by all the assignors. If there is no common protocols supported // across all the assignors, throw an exception. // 2. if there are multiple protocols that are commonly supported, select the one with the highest id (i.e. the // id number indicates how advanced the protocol is). // we know there are at least one assignor in the list, no need to double check for NPE if (!assignors.isEmpty()) { List<RebalanceProtocol> supportedProtocols = new ArrayList<>(assignors.get(0).supportedProtocols()); for (ConsumerPartitionAssignor assignor : assignors) { supportedProtocols.retainAll(assignor.supportedProtocols()); } if (supportedProtocols.isEmpty()) { throw new IllegalArgumentException("Specified assignors " + assignors.stream().map(ConsumerPartitionAssignor::name).collect(Collectors.toSet()) + " do not have commonly supported rebalance protocol"); } Collections.sort(supportedProtocols); protocol = supportedProtocols.get(supportedProtocols.size() - 1); } else { protocol = null; } this.metadata.requestUpdate(); } // package private for testing boolean isLeader() { return this.isLeader; } // package private for testing SubscriptionState subscriptionState() { return this.subscriptions; } @Override public String protocolType() { return ConsumerProtocol.PROTOCOL_TYPE; } @Override protected JoinGroupRequestData.JoinGroupRequestProtocolCollection metadata() { log.debug("Joining group with current subscription: {}", subscriptions.subscription()); this.joinedSubscription = subscriptions.subscription(); JoinGroupRequestData.JoinGroupRequestProtocolCollection protocolSet = new JoinGroupRequestData.JoinGroupRequestProtocolCollection(); List<String> topics = new ArrayList<>(joinedSubscription); for (ConsumerPartitionAssignor assignor : assignors) { Subscription subscription = new Subscription(topics, assignor.subscriptionUserData(joinedSubscription), subscriptions.assignedPartitionsList(), generation().generationId, rackId); ByteBuffer metadata = ConsumerProtocol.serializeSubscription(subscription); protocolSet.add(new JoinGroupRequestData.JoinGroupRequestProtocol() .setName(assignor.name()) .setMetadata(Utils.toArray(metadata))); } return protocolSet; } public void updatePatternSubscription(Cluster cluster) { final Set<String> topicsToSubscribe = cluster.topics().stream() .filter(subscriptions::matchesSubscribedPattern) .collect(Collectors.toSet()); if (subscriptions.subscribeFromPattern(topicsToSubscribe)) metadata.requestUpdateForNewTopics(); } private ConsumerPartitionAssignor lookupAssignor(String name) { for (ConsumerPartitionAssignor assignor : this.assignors) { if (assignor.name().equals(name)) return assignor; } return null; } private void maybeUpdateJoinedSubscription(Set<TopicPartition> assignedPartitions) { if (subscriptions.hasPatternSubscription()) { // Check if the assignment contains some topics that were not in the original // subscription, if yes we will obey what leader has decided and add these topics // into the subscriptions as long as they still match the subscribed pattern Set<String> addedTopics = new HashSet<>(); // this is a copy because its handed to listener below for (TopicPartition tp : assignedPartitions) { if (!joinedSubscription.contains(tp.topic())) addedTopics.add(tp.topic()); } if (!addedTopics.isEmpty()) { Set<String> newSubscription = new HashSet<>(subscriptions.subscription()); Set<String> newJoinedSubscription = new HashSet<>(joinedSubscription); newSubscription.addAll(addedTopics); newJoinedSubscription.addAll(addedTopics); if (this.subscriptions.subscribeFromPattern(newSubscription)) metadata.requestUpdateForNewTopics(); this.joinedSubscription = newJoinedSubscription; } } } private Exception invokeOnAssignment(final ConsumerPartitionAssignor assignor, final Assignment assignment) { log.info("Notifying assignor about the new {}", assignment); try { assignor.onAssignment(assignment, groupMetadata); } catch (Exception e) { return e; } return null; } private Exception invokePartitionsAssigned(final SortedSet<TopicPartition> assignedPartitions) { log.info("Adding newly assigned partitions: {}", Utils.join(assignedPartitions, ", ")); ConsumerRebalanceListener listener = subscriptions.rebalanceListener(); try { final long startMs = time.milliseconds(); listener.onPartitionsAssigned(assignedPartitions); sensors.assignCallbackSensor.record(time.milliseconds() - startMs); } catch (WakeupException | InterruptException e) { throw e; } catch (Exception e) { log.error("User provided listener {} failed on invocation of onPartitionsAssigned for partitions {}", listener.getClass().getName(), assignedPartitions, e); return e; } return null; } private Exception invokePartitionsRevoked(final SortedSet<TopicPartition> revokedPartitions) { log.info("Revoke previously assigned partitions {}", Utils.join(revokedPartitions, ", ")); Set<TopicPartition> revokePausedPartitions = subscriptions.pausedPartitions(); revokePausedPartitions.retainAll(revokedPartitions); if (!revokePausedPartitions.isEmpty()) log.info("The pause flag in partitions [{}] will be removed due to revocation.", Utils.join(revokePausedPartitions, ", ")); ConsumerRebalanceListener listener = subscriptions.rebalanceListener(); try { final long startMs = time.milliseconds(); listener.onPartitionsRevoked(revokedPartitions); sensors.revokeCallbackSensor.record(time.milliseconds() - startMs); } catch (WakeupException | InterruptException e) { throw e; } catch (Exception e) { log.error("User provided listener {} failed on invocation of onPartitionsRevoked for partitions {}", listener.getClass().getName(), revokedPartitions, e); return e; } return null; } private Exception invokePartitionsLost(final SortedSet<TopicPartition> lostPartitions) { log.info("Lost previously assigned partitions {}", Utils.join(lostPartitions, ", ")); Set<TopicPartition> lostPausedPartitions = subscriptions.pausedPartitions(); lostPausedPartitions.retainAll(lostPartitions); if (!lostPausedPartitions.isEmpty()) log.info("The pause flag in partitions [{}] will be removed due to partition lost.", Utils.join(lostPausedPartitions, ", ")); ConsumerRebalanceListener listener = subscriptions.rebalanceListener(); try { final long startMs = time.milliseconds(); listener.onPartitionsLost(lostPartitions); sensors.loseCallbackSensor.record(time.milliseconds() - startMs); } catch (WakeupException | InterruptException e) { throw e; } catch (Exception e) { log.error("User provided listener {} failed on invocation of onPartitionsLost for partitions {}", listener.getClass().getName(), lostPartitions, e); return e; } return null; } @Override protected void onJoinComplete(int generation, String memberId, String assignmentStrategy, ByteBuffer assignmentBuffer) { log.debug("Executing onJoinComplete with generation {} and memberId {}", generation, memberId); // Only the leader is responsible for monitoring for metadata changes (i.e. partition changes) if (!isLeader) assignmentSnapshot = null; ConsumerPartitionAssignor assignor = lookupAssignor(assignmentStrategy); if (assignor == null) throw new IllegalStateException("Coordinator selected invalid assignment protocol: " + assignmentStrategy); // Give the assignor a chance to update internal state based on the received assignment groupMetadata = new ConsumerGroupMetadata(rebalanceConfig.groupId, generation, memberId, rebalanceConfig.groupInstanceId); SortedSet<TopicPartition> ownedPartitions = new TreeSet<>(COMPARATOR); ownedPartitions.addAll(subscriptions.assignedPartitions()); // should at least encode the short version if (assignmentBuffer.remaining() < 2) throw new IllegalStateException("There are insufficient bytes available to read assignment from the sync-group response (" + "actual byte size " + assignmentBuffer.remaining() + ") , this is not expected; " + "it is possible that the leader's assign function is buggy and did not return any assignment for this member, " + "or because static member is configured and the protocol is buggy hence did not get the assignment for this member"); Assignment assignment = ConsumerProtocol.deserializeAssignment(assignmentBuffer); SortedSet<TopicPartition> assignedPartitions = new TreeSet<>(COMPARATOR); assignedPartitions.addAll(assignment.partitions()); if (!subscriptions.checkAssignmentMatchedSubscription(assignedPartitions)) { final String fullReason = String.format("received assignment %s does not match the current subscription %s; " + "it is likely that the subscription has changed since we joined the group, will re-join with current subscription", assignment.partitions(), subscriptions.prettyString()); requestRejoin("received assignment does not match the current subscription", fullReason); return; } final AtomicReference<Exception> firstException = new AtomicReference<>(null); SortedSet<TopicPartition> addedPartitions = new TreeSet<>(COMPARATOR); addedPartitions.addAll(assignedPartitions); addedPartitions.removeAll(ownedPartitions); if (protocol == RebalanceProtocol.COOPERATIVE) { SortedSet<TopicPartition> revokedPartitions = new TreeSet<>(COMPARATOR); revokedPartitions.addAll(ownedPartitions); revokedPartitions.removeAll(assignedPartitions); log.info("Updating assignment with\n" + "\tAssigned partitions: {}\n" + "\tCurrent owned partitions: {}\n" + "\tAdded partitions (assigned - owned): {}\n" + "\tRevoked partitions (owned - assigned): {}\n", assignedPartitions, ownedPartitions, addedPartitions, revokedPartitions ); if (!revokedPartitions.isEmpty()) { // Revoke partitions that were previously owned but no longer assigned; // note that we should only change the assignment (or update the assignor's state) // AFTER we've triggered the revoke callback firstException.compareAndSet(null, invokePartitionsRevoked(revokedPartitions)); // If revoked any partitions, need to re-join the group afterwards final String fullReason = String.format("need to revoke partitions %s as indicated " + "by the current assignment and re-join", revokedPartitions); requestRejoin("need to revoke partitions and re-join", fullReason); } } // The leader may have assigned partitions which match our subscription pattern, but which // were not explicitly requested, so we update the joined subscription here. maybeUpdateJoinedSubscription(assignedPartitions); // Catch any exception here to make sure we could complete the user callback. firstException.compareAndSet(null, invokeOnAssignment(assignor, assignment)); // Reschedule the auto commit starting from now if (autoCommitEnabled) this.nextAutoCommitTimer.updateAndReset(autoCommitIntervalMs); subscriptions.assignFromSubscribed(assignedPartitions); // Add partitions that were not previously owned but are now assigned firstException.compareAndSet(null, invokePartitionsAssigned(addedPartitions)); if (firstException.get() != null) { if (firstException.get() instanceof KafkaException) { throw (KafkaException) firstException.get(); } else { throw new KafkaException("User rebalance callback throws an error", firstException.get()); } } } void maybeUpdateSubscriptionMetadata() { int version = metadata.updateVersion(); if (version > metadataSnapshot.version) { Cluster cluster = metadata.fetch(); if (subscriptions.hasPatternSubscription()) updatePatternSubscription(cluster); // Update the current snapshot, which will be used to check for subscription // changes that would require a rebalance (e.g. new partitions). metadataSnapshot = new MetadataSnapshot(rackId, subscriptions, cluster, version); } } private boolean coordinatorUnknownAndUnreadySync(Timer timer) { return coordinatorUnknown() && !ensureCoordinatorReady(timer); } private boolean coordinatorUnknownAndUnreadyAsync() { return coordinatorUnknown() && !ensureCoordinatorReadyAsync(); } /** * Poll for coordinator events. This ensures that the coordinator is known and that the consumer * has joined the group (if it is using group management). This also handles periodic offset commits * if they are enabled. * <p> * Returns early if the timeout expires or if waiting on rejoin is not required * * @param timer Timer bounding how long this method can block * @param waitForJoinGroup Boolean flag indicating if we should wait until re-join group completes * @throws KafkaException if the rebalance callback throws an exception * @return true iff the operation succeeded */ public boolean poll(Timer timer, boolean waitForJoinGroup) { maybeUpdateSubscriptionMetadata(); invokeCompletedOffsetCommitCallbacks(); if (subscriptions.hasAutoAssignedPartitions()) { if (protocol == null) { throw new IllegalStateException("User configured " + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG + " to empty while trying to subscribe for group protocol to auto assign partitions"); } // Always update the heartbeat last poll time so that the heartbeat thread does not leave the // group proactively due to application inactivity even if (say) the coordinator cannot be found. pollHeartbeat(timer.currentTimeMs()); if (coordinatorUnknownAndUnreadySync(timer)) { return false; } if (rejoinNeededOrPending()) { // due to a race condition between the initial metadata fetch and the initial rebalance, // we need to ensure that the metadata is fresh before joining initially. This ensures // that we have matched the pattern against the cluster's topics at least once before joining. if (subscriptions.hasPatternSubscription()) { // For consumer group that uses pattern-based subscription, after a topic is created, // any consumer that discovers the topic after metadata refresh can trigger rebalance // across the entire consumer group. Multiple rebalances can be triggered after one topic // creation if consumers refresh metadata at vastly different times. We can significantly // reduce the number of rebalances caused by single topic creation by asking consumer to // refresh metadata before re-joining the group as long as the refresh backoff time has // passed. if (this.metadata.timeToAllowUpdate(timer.currentTimeMs()) == 0) { this.metadata.requestUpdate(); } if (!client.ensureFreshMetadata(timer)) { return false; } maybeUpdateSubscriptionMetadata(); } // if not wait for join group, we would just use a timer of 0 if (!ensureActiveGroup(waitForJoinGroup ? timer : time.timer(0L))) { // since we may use a different timer in the callee, we'd still need // to update the original timer's current time after the call timer.update(time.milliseconds()); return false; } } } else { // For manually assigned partitions, we do not try to pro-actively lookup coordinator; // instead we only try to refresh metadata when necessary. // If connections to all nodes fail, wakeups triggered while attempting to send fetch // requests result in polls returning immediately, causing a tight loop of polls. Without // the wakeup, poll() with no channels would block for the timeout, delaying re-connection. // awaitMetadataUpdate() in ensureCoordinatorReady initiates new connections with configured backoff and avoids the busy loop. if (metadata.updateRequested() && !client.hasReadyNodes(timer.currentTimeMs())) { client.awaitMetadataUpdate(timer); } // if there is pending coordinator requests, ensure they have a chance to be transmitted. client.pollNoWakeup(); } maybeAutoCommitOffsetsAsync(timer.currentTimeMs()); return true; } /** * Return the time to the next needed invocation of {@link ConsumerNetworkClient#poll(Timer)}. * @param now current time in milliseconds * @return the maximum time in milliseconds the caller should wait before the next invocation of poll() */ public long timeToNextPoll(long now) { if (!autoCommitEnabled) return timeToNextHeartbeat(now); return Math.min(nextAutoCommitTimer.remainingMs(), timeToNextHeartbeat(now)); } private void updateGroupSubscription(Set<String> topics) { // the leader will begin watching for changes to any of the topics the group is interested in, // which ensures that all metadata changes will eventually be seen if (this.subscriptions.groupSubscribe(topics)) metadata.requestUpdateForNewTopics(); // update metadata (if needed) and keep track of the metadata used for assignment so that // we can check after rebalance completion whether anything has changed if (!client.ensureFreshMetadata(time.timer(Long.MAX_VALUE))) throw new TimeoutException(); maybeUpdateSubscriptionMetadata(); } private boolean isAssignFromSubscribedTopicsAssignor(String name) { return ASSIGN_FROM_SUBSCRIBED_ASSIGNORS.contains(name); } /** * user-customized assignor may have created some topics that are not in the subscription list * and assign their partitions to the members; in this case we would like to update the leader's * own metadata with the newly added topics so that it will not trigger a subsequent rebalance * when these topics gets updated from metadata refresh. * * We skip the check for in-product assignors since this will not happen in in-product assignors. * * TODO: this is a hack and not something we want to support long-term unless we push regex into the protocol * we may need to modify the ConsumerPartitionAssignor API to better support this case. * * @param assignorName the selected assignor name * @param assignments the assignments after assignor assigned * @param allSubscribedTopics all consumers' subscribed topics */ private void maybeUpdateGroupSubscription(String assignorName, Map<String, Assignment> assignments, Set<String> allSubscribedTopics) { if (!isAssignFromSubscribedTopicsAssignor(assignorName)) { Set<String> assignedTopics = new HashSet<>(); for (Assignment assigned : assignments.values()) { for (TopicPartition tp : assigned.partitions()) assignedTopics.add(tp.topic()); } if (!assignedTopics.containsAll(allSubscribedTopics)) { SortedSet<String> notAssignedTopics = new TreeSet<>(allSubscribedTopics); notAssignedTopics.removeAll(assignedTopics); log.warn("The following subscribed topics are not assigned to any members: {} ", notAssignedTopics); } if (!allSubscribedTopics.containsAll(assignedTopics)) { SortedSet<String> newlyAddedTopics = new TreeSet<>(assignedTopics); newlyAddedTopics.removeAll(allSubscribedTopics); log.info("The following not-subscribed topics are assigned, and their metadata will be " + "fetched from the brokers: {}", newlyAddedTopics); allSubscribedTopics.addAll(newlyAddedTopics); updateGroupSubscription(allSubscribedTopics); } } } @Override protected Map<String, ByteBuffer> onLeaderElected(String leaderId, String assignmentStrategy, List<JoinGroupResponseData.JoinGroupResponseMember> allSubscriptions, boolean skipAssignment) { ConsumerPartitionAssignor assignor = lookupAssignor(assignmentStrategy); if (assignor == null) throw new IllegalStateException("Coordinator selected invalid assignment protocol: " + assignmentStrategy); String assignorName = assignor.name(); Set<String> allSubscribedTopics = new HashSet<>(); Map<String, Subscription> subscriptions = new HashMap<>(); // collect all the owned partitions Map<String, List<TopicPartition>> ownedPartitions = new HashMap<>(); for (JoinGroupResponseData.JoinGroupResponseMember memberSubscription : allSubscriptions) { Subscription subscription = ConsumerProtocol.deserializeSubscription(ByteBuffer.wrap(memberSubscription.metadata())); subscription.setGroupInstanceId(Optional.ofNullable(memberSubscription.groupInstanceId())); subscriptions.put(memberSubscription.memberId(), subscription); allSubscribedTopics.addAll(subscription.topics()); ownedPartitions.put(memberSubscription.memberId(), subscription.ownedPartitions()); } // the leader will begin watching for changes to any of the topics the group is interested in, // which ensures that all metadata changes will eventually be seen updateGroupSubscription(allSubscribedTopics); isLeader = true; if (skipAssignment) { log.info("Skipped assignment for returning static leader at generation {}. The static leader " + "will continue with its existing assignment.", generation().generationId); assignmentSnapshot = metadataSnapshot; return Collections.emptyMap(); } log.debug("Performing assignment using strategy {} with subscriptions {}", assignorName, subscriptions); Map<String, Assignment> assignments = assignor.assign(metadata.fetch(), new GroupSubscription(subscriptions)).groupAssignment(); // skip the validation for built-in cooperative sticky assignor since we've considered // the "generation" of ownedPartition inside the assignor if (protocol == RebalanceProtocol.COOPERATIVE && !assignorName.equals(COOPERATIVE_STICKY_ASSIGNOR_NAME)) { validateCooperativeAssignment(ownedPartitions, assignments); } maybeUpdateGroupSubscription(assignorName, assignments, allSubscribedTopics); // metadataSnapshot could be updated when the subscription is updated therefore // we must take the assignment snapshot after. assignmentSnapshot = metadataSnapshot; log.info("Finished assignment for group at generation {}: {}", generation().generationId, assignments); Map<String, ByteBuffer> groupAssignment = new HashMap<>(); for (Map.Entry<String, Assignment> assignmentEntry : assignments.entrySet()) { ByteBuffer buffer = ConsumerProtocol.serializeAssignment(assignmentEntry.getValue()); groupAssignment.put(assignmentEntry.getKey(), buffer); } return groupAssignment; } /** * Used by COOPERATIVE rebalance protocol only. * * Validate the assignments returned by the assignor such that no owned partitions are going to * be reassigned to a different consumer directly: if the assignor wants to reassign an owned partition, * it must first remove it from the new assignment of the current owner so that it is not assigned to any * member, and then in the next rebalance it can finally reassign those partitions not owned by anyone to consumers. */ private void validateCooperativeAssignment(final Map<String, List<TopicPartition>> ownedPartitions, final Map<String, Assignment> assignments) { Set<TopicPartition> totalRevokedPartitions = new HashSet<>(); SortedSet<TopicPartition> totalAddedPartitions = new TreeSet<>(COMPARATOR); for (final Map.Entry<String, Assignment> entry : assignments.entrySet()) { final Assignment assignment = entry.getValue(); final Set<TopicPartition> addedPartitions = new HashSet<>(assignment.partitions()); addedPartitions.removeAll(ownedPartitions.get(entry.getKey())); final Set<TopicPartition> revokedPartitions = new HashSet<>(ownedPartitions.get(entry.getKey())); revokedPartitions.removeAll(assignment.partitions()); totalAddedPartitions.addAll(addedPartitions); totalRevokedPartitions.addAll(revokedPartitions); } // if there are overlap between revoked partitions and added partitions, it means some partitions // immediately gets re-assigned to another member while it is still claimed by some member totalAddedPartitions.retainAll(totalRevokedPartitions); if (!totalAddedPartitions.isEmpty()) { log.error("With the COOPERATIVE protocol, owned partitions cannot be " + "reassigned to other members; however the assignor has reassigned partitions {} which are still owned " + "by some members", totalAddedPartitions); throw new IllegalStateException("Assignor supporting the COOPERATIVE protocol violates its requirements"); } } @Override protected boolean onJoinPrepare(Timer timer, int generation, String memberId) { log.debug("Executing onJoinPrepare with generation {} and memberId {}", generation, memberId); if (joinPrepareTimer == null) { // We should complete onJoinPrepare before rebalanceTimeout, // and continue to join group to avoid member got kicked out from group joinPrepareTimer = time.timer(rebalanceConfig.rebalanceTimeoutMs); } else { joinPrepareTimer.update(); } // async commit offsets prior to rebalance if auto-commit enabled // and there is no in-flight offset commit request if (autoCommitEnabled && autoCommitOffsetRequestFuture == null) { maybeMarkPartitionsPendingRevocation(); autoCommitOffsetRequestFuture = maybeAutoCommitOffsetsAsync(); } // wait for commit offset response before timer expired if (autoCommitOffsetRequestFuture != null) { Timer pollTimer = timer.remainingMs() < joinPrepareTimer.remainingMs() ? timer : joinPrepareTimer; client.poll(autoCommitOffsetRequestFuture, pollTimer); joinPrepareTimer.update(); // Keep retrying/waiting the offset commit when: // 1. offset commit haven't done (and joinPrepareTimer not expired) // 2. failed with retryable exception (and joinPrepareTimer not expired) // Otherwise, continue to revoke partitions, ex: // 1. if joinPrepareTime has expired // 2. if offset commit failed with no-retryable exception // 3. if offset commit success boolean onJoinPrepareAsyncCommitCompleted = true; if (joinPrepareTimer.isExpired()) { log.error("Asynchronous auto-commit of offsets failed: joinPrepare timeout. Will continue to join group"); } else if (!autoCommitOffsetRequestFuture.isDone()) { onJoinPrepareAsyncCommitCompleted = false; } else if (autoCommitOffsetRequestFuture.failed() && autoCommitOffsetRequestFuture.isRetriable()) { log.debug("Asynchronous auto-commit of offsets failed with retryable error: {}. Will retry it.", autoCommitOffsetRequestFuture.exception().getMessage()); onJoinPrepareAsyncCommitCompleted = false; } else if (autoCommitOffsetRequestFuture.failed() && !autoCommitOffsetRequestFuture.isRetriable()) { log.error("Asynchronous auto-commit of offsets failed: {}. Will continue to join group.", autoCommitOffsetRequestFuture.exception().getMessage()); } if (autoCommitOffsetRequestFuture.isDone()) { autoCommitOffsetRequestFuture = null; } if (!onJoinPrepareAsyncCommitCompleted) { pollTimer.sleep(Math.min(pollTimer.remainingMs(), rebalanceConfig.retryBackoffMs)); timer.update(); return false; } } // the generation / member-id can possibly be reset by the heartbeat thread // upon getting errors or heartbeat timeouts; in this case whatever is previously // owned partitions would be lost, we should trigger the callback and cleanup the assignment; // otherwise we can proceed normally and revoke the partitions depending on the protocol, // and in that case we should only change the assignment AFTER the revoke callback is triggered // so that users can still access the previously owned partitions to commit offsets etc. Exception exception = null; final SortedSet<TopicPartition> revokedPartitions = new TreeSet<>(COMPARATOR); if (generation == Generation.NO_GENERATION.generationId || memberId.equals(Generation.NO_GENERATION.memberId)) { revokedPartitions.addAll(subscriptions.assignedPartitions()); if (!revokedPartitions.isEmpty()) { log.info("Giving away all assigned partitions as lost since generation/memberID has been reset," + "indicating that consumer is in old state or no longer part of the group"); exception = invokePartitionsLost(revokedPartitions); subscriptions.assignFromSubscribed(Collections.emptySet()); } } else { switch (protocol) { case EAGER: // revoke all partitions revokedPartitions.addAll(subscriptions.assignedPartitions()); exception = invokePartitionsRevoked(revokedPartitions); subscriptions.assignFromSubscribed(Collections.emptySet()); break; case COOPERATIVE: // only revoke those partitions that are not in the subscription any more. Set<TopicPartition> ownedPartitions = new HashSet<>(subscriptions.assignedPartitions()); revokedPartitions.addAll(ownedPartitions.stream() .filter(tp -> !subscriptions.subscription().contains(tp.topic())) .collect(Collectors.toSet())); if (!revokedPartitions.isEmpty()) { exception = invokePartitionsRevoked(revokedPartitions); ownedPartitions.removeAll(revokedPartitions); subscriptions.assignFromSubscribed(ownedPartitions); } break; } } isLeader = false; subscriptions.resetGroupSubscription(); joinPrepareTimer = null; autoCommitOffsetRequestFuture = null; timer.update(); if (exception != null) { throw new KafkaException("User rebalance callback throws an error", exception); } return true; } private void maybeMarkPartitionsPendingRevocation() { if (protocol != RebalanceProtocol.EAGER) { return; } // When asynchronously committing offsets prior to the revocation of a set of partitions, there will be a // window of time between when the offset commit is sent and when it returns and revocation completes. It is // possible for pending fetches for these partitions to return during this time, which means the application's // position may get ahead of the committed position prior to revocation. This can cause duplicate consumption. // To prevent this, we mark the partitions as "pending revocation," which stops the Fetcher from sending new // fetches or returning data from previous fetches to the user. Set<TopicPartition> partitions = subscriptions.assignedPartitions(); log.debug("Marking assigned partitions pending for revocation: {}", partitions); subscriptions.markPendingRevocation(partitions); } @Override public void onLeavePrepare() { // Save the current Generation, as the hb thread can change it at any time final Generation currentGeneration = generation(); log.debug("Executing onLeavePrepare with generation {}", currentGeneration); // we should reset assignment and trigger the callback before leaving group SortedSet<TopicPartition> droppedPartitions = new TreeSet<>(COMPARATOR); droppedPartitions.addAll(subscriptions.assignedPartitions()); if (subscriptions.hasAutoAssignedPartitions() && !droppedPartitions.isEmpty()) { final Exception e; if ((currentGeneration.generationId == Generation.NO_GENERATION.generationId || currentGeneration.memberId.equals(Generation.NO_GENERATION.memberId)) || rebalanceInProgress()) { e = invokePartitionsLost(droppedPartitions); } else { e = invokePartitionsRevoked(droppedPartitions); } subscriptions.assignFromSubscribed(Collections.emptySet()); if (e != null) { throw new KafkaException("User rebalance callback throws an error", e); } } } /** * @throws KafkaException if the callback throws exception */ @Override public boolean rejoinNeededOrPending() { if (!subscriptions.hasAutoAssignedPartitions()) return false; // we need to rejoin if we performed the assignment and metadata has changed; // also for those owned-but-no-longer-existed partitions we should drop them as lost if (assignmentSnapshot != null && !assignmentSnapshot.matches(metadataSnapshot)) { final String fullReason = String.format("cached metadata has changed from %s at the beginning of the rebalance to %s", assignmentSnapshot, metadataSnapshot); requestRejoinIfNecessary("cached metadata has changed", fullReason); return true; } // we need to join if our subscription has changed since the last join if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) { final String fullReason = String.format("subscription has changed from %s at the beginning of the rebalance to %s", joinedSubscription, subscriptions.subscription()); requestRejoinIfNecessary("subscription has changed", fullReason); return true; } return super.rejoinNeededOrPending(); } /** * Refresh the committed offsets for provided partitions. * * @param timer Timer bounding how long this method can block * @return true iff the operation completed within the timeout */ public boolean refreshCommittedOffsetsIfNeeded(Timer timer) { final Set<TopicPartition> initializingPartitions = subscriptions.initializingPartitions(); final Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(initializingPartitions, timer); if (offsets == null) return false; for (final Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { final TopicPartition tp = entry.getKey(); final OffsetAndMetadata offsetAndMetadata = entry.getValue(); if (offsetAndMetadata != null) { // first update the epoch if necessary entry.getValue().leaderEpoch().ifPresent(epoch -> this.metadata.updateLastSeenEpochIfNewer(entry.getKey(), epoch)); // it's possible that the partition is no longer assigned when the response is received, // so we need to ignore seeking if that's the case if (this.subscriptions.isAssigned(tp)) { final ConsumerMetadata.LeaderAndEpoch leaderAndEpoch = metadata.currentLeader(tp); final SubscriptionState.FetchPosition position = new SubscriptionState.FetchPosition( offsetAndMetadata.offset(), offsetAndMetadata.leaderEpoch(), leaderAndEpoch); this.subscriptions.seekUnvalidated(tp, position); log.info("Setting offset for partition {} to the committed offset {}", tp, position); } else { log.info("Ignoring the returned {} since its partition {} is no longer assigned", offsetAndMetadata, tp); } } } return true; } /** * Fetch the current committed offsets from the coordinator for a set of partitions. * * @param partitions The partitions to fetch offsets for * @return A map from partition to the committed offset or null if the operation timed out */ public Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(final Set<TopicPartition> partitions, final Timer timer) { if (partitions.isEmpty()) return Collections.emptyMap(); final Generation generationForOffsetRequest = generationIfStable(); if (pendingCommittedOffsetRequest != null && !pendingCommittedOffsetRequest.sameRequest(partitions, generationForOffsetRequest)) { // if we were waiting for a different request, then just clear it. pendingCommittedOffsetRequest = null; } do { if (!ensureCoordinatorReady(timer)) return null; // contact coordinator to fetch committed offsets final RequestFuture<Map<TopicPartition, OffsetAndMetadata>> future; if (pendingCommittedOffsetRequest != null) { future = pendingCommittedOffsetRequest.response; } else { future = sendOffsetFetchRequest(partitions); pendingCommittedOffsetRequest = new PendingCommittedOffsetRequest(partitions, generationForOffsetRequest, future); } client.poll(future, timer); if (future.isDone()) { pendingCommittedOffsetRequest = null; if (future.succeeded()) { return future.value(); } else if (!future.isRetriable()) { throw future.exception(); } else { timer.sleep(rebalanceConfig.retryBackoffMs); } } else { return null; } } while (timer.notExpired()); return null; } /** * Return the consumer group metadata. * * @return the current consumer group metadata */ public ConsumerGroupMetadata groupMetadata() { return groupMetadata; } /** * @throws KafkaException if the rebalance callback throws exception */ public void close(final Timer timer) { // we do not need to re-enable wakeups since we are closing already client.disableWakeups(); try { maybeAutoCommitOffsetsSync(timer); while (pendingAsyncCommits.get() > 0 && timer.notExpired()) { ensureCoordinatorReady(timer); client.poll(timer); invokeCompletedOffsetCommitCallbacks(); } } finally { super.close(timer); } } // visible for testing void invokeCompletedOffsetCommitCallbacks() { if (asyncCommitFenced.get()) { throw new FencedInstanceIdException("Get fenced exception for group.instance.id " + rebalanceConfig.groupInstanceId.orElse("unset_instance_id") + ", current member.id is " + memberId()); } while (true) { OffsetCommitCompletion completion = completedOffsetCommits.poll(); if (completion == null) { break; } completion.invoke(); } } public RequestFuture<Void> commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback) { invokeCompletedOffsetCommitCallbacks(); RequestFuture<Void> future = null; if (offsets.isEmpty()) { // No need to check coordinator if offsets is empty since commit of empty offsets is completed locally. future = doCommitOffsetsAsync(offsets, callback); } else if (!coordinatorUnknownAndUnreadyAsync()) { // we need to make sure coordinator is ready before committing, since // this is for async committing we do not try to block, but just try once to // clear the previous discover-coordinator future, resend, or get responses; // if the coordinator is not ready yet then we would just proceed and put that into the // pending requests, and future poll calls would still try to complete them. // // the key here though is that we have to try sending the discover-coordinator if // it's not known or ready, since this is the only place we can send such request // under manual assignment (there we would not have heartbeat thread trying to auto-rediscover // the coordinator). future = doCommitOffsetsAsync(offsets, callback); } else { // we don't know the current coordinator, so try to find it and then send the commit // or fail (we don't want recursive retries which can cause offset commits to arrive // out of order). Note that there may be multiple offset commits chained to the same // coordinator lookup request. This is fine because the listeners will be invoked in // the same order that they were added. Note also that AbstractCoordinator prevents // multiple concurrent coordinator lookup requests. pendingAsyncCommits.incrementAndGet(); lookupCoordinator().addListener(new RequestFutureListener<Void>() { @Override public void onSuccess(Void value) { pendingAsyncCommits.decrementAndGet(); doCommitOffsetsAsync(offsets, callback); client.pollNoWakeup(); } @Override public void onFailure(RuntimeException e) { pendingAsyncCommits.decrementAndGet(); completedOffsetCommits.add(new OffsetCommitCompletion(callback, offsets, new RetriableCommitFailedException(e))); } }); } // ensure the commit has a chance to be transmitted (without blocking on its completion). // Note that commits are treated as heartbeats by the coordinator, so there is no need to // explicitly allow heartbeats through delayed task execution. client.pollNoWakeup(); return future; } private RequestFuture<Void> doCommitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback) { RequestFuture<Void> future = sendOffsetCommitRequest(offsets); final OffsetCommitCallback cb = callback == null ? defaultOffsetCommitCallback : callback; future.addListener(new RequestFutureListener<Void>() { @Override public void onSuccess(Void value) { if (interceptors != null) interceptors.onCommit(offsets); completedOffsetCommits.add(new OffsetCommitCompletion(cb, offsets, null)); } @Override public void onFailure(RuntimeException e) { Exception commitException = e; if (e instanceof RetriableException) { commitException = new RetriableCommitFailedException(e); } completedOffsetCommits.add(new OffsetCommitCompletion(cb, offsets, commitException)); if (commitException instanceof FencedInstanceIdException) { asyncCommitFenced.set(true); } } }); return future; } /** * Commit offsets synchronously. This method will retry until the commit completes successfully * or an unrecoverable error is encountered. * @param offsets The offsets to be committed * @throws org.apache.kafka.common.errors.AuthorizationException if the consumer is not authorized to the group * or to any of the specified partitions. See the exception for more details * @throws CommitFailedException if an unrecoverable error occurs before the commit can be completed * @throws FencedInstanceIdException if a static member gets fenced * @return If the offset commit was successfully sent and a successful response was received from * the coordinator */ public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, Timer timer) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; do { if (coordinatorUnknownAndUnreadySync(timer)) { return false; } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, timer); // We may have had in-flight offset commits when the synchronous commit began. If so, ensure that // the corresponding callbacks are invoked prior to returning in order to preserve the order that // the offset commits were applied. invokeCompletedOffsetCommitCallbacks(); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (future.failed() && !future.isRetriable()) throw future.exception(); timer.sleep(rebalanceConfig.retryBackoffMs); } while (timer.notExpired()); return false; } private void maybeAutoCommitOffsetsSync(Timer timer) { if (autoCommitEnabled) { Map<TopicPartition, OffsetAndMetadata> allConsumedOffsets = subscriptions.allConsumed(); try { log.debug("Sending synchronous auto-commit of offsets {}", allConsumedOffsets); if (!commitOffsetsSync(allConsumedOffsets, timer)) log.debug("Auto-commit of offsets {} timed out before completion", allConsumedOffsets); } catch (WakeupException | InterruptException e) { log.debug("Auto-commit of offsets {} was interrupted before completion", allConsumedOffsets); // rethrow wakeups since they are triggered by the user throw e; } catch (Exception e) { // consistent with async auto-commit failures, we do not propagate the exception log.warn("Synchronous auto-commit of offsets {} failed: {}", allConsumedOffsets, e.getMessage()); } } } public void maybeAutoCommitOffsetsAsync(long now) { if (autoCommitEnabled) { nextAutoCommitTimer.update(now); if (nextAutoCommitTimer.isExpired()) { nextAutoCommitTimer.reset(autoCommitIntervalMs); autoCommitOffsetsAsync(); } } } private RequestFuture<Void> autoCommitOffsetsAsync() { Map<TopicPartition, OffsetAndMetadata> allConsumedOffsets = subscriptions.allConsumed(); log.debug("Sending asynchronous auto-commit of offsets {}", allConsumedOffsets); return commitOffsetsAsync(allConsumedOffsets, (offsets, exception) -> { if (exception != null) { if (exception instanceof RetriableCommitFailedException) { log.debug("Asynchronous auto-commit of offsets {} failed due to retriable error: {}", offsets, exception); nextAutoCommitTimer.updateAndReset(rebalanceConfig.retryBackoffMs); } else { log.warn("Asynchronous auto-commit of offsets {} failed: {}", offsets, exception.getMessage()); } } else { log.debug("Completed asynchronous auto-commit of offsets {}", offsets); } }); } private RequestFuture<Void> maybeAutoCommitOffsetsAsync() { if (autoCommitEnabled) return autoCommitOffsetsAsync(); return null; } private class DefaultOffsetCommitCallback implements OffsetCommitCallback { @Override public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) { if (exception != null) log.error("Offset commit with offsets {} failed", offsets, exception); } } /** * Commit offsets for the specified list of topics and partitions. This is a non-blocking call * which returns a request future that can be polled in the case of a synchronous commit or ignored in the * asynchronous case. * * NOTE: This is visible only for testing * * @param offsets The list of offsets per partition that should be committed. * @return A request future whose value indicates whether the commit was successful or not */ RequestFuture<Void> sendOffsetCommitRequest(final Map<TopicPartition, OffsetAndMetadata> offsets) { if (offsets.isEmpty()) return RequestFuture.voidSuccess(); Node coordinator = checkAndGetCoordinator(); if (coordinator == null) return RequestFuture.coordinatorNotAvailable(); // create the offset commit request Map<String, OffsetCommitRequestData.OffsetCommitRequestTopic> requestTopicDataMap = new HashMap<>(); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition topicPartition = entry.getKey(); OffsetAndMetadata offsetAndMetadata = entry.getValue(); if (offsetAndMetadata.offset() < 0) { return RequestFuture.failure(new IllegalArgumentException("Invalid offset: " + offsetAndMetadata.offset())); } OffsetCommitRequestData.OffsetCommitRequestTopic topic = requestTopicDataMap .getOrDefault(topicPartition.topic(), new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName(topicPartition.topic()) ); topic.partitions().add(new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(topicPartition.partition()) .setCommittedOffset(offsetAndMetadata.offset()) .setCommittedLeaderEpoch(offsetAndMetadata.leaderEpoch().orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH)) .setCommittedMetadata(offsetAndMetadata.metadata()) ); requestTopicDataMap.put(topicPartition.topic(), topic); } final Generation generation; final String groupInstanceId; if (subscriptions.hasAutoAssignedPartitions()) { generation = generationIfStable(); groupInstanceId = rebalanceConfig.groupInstanceId.orElse(null); // if the generation is null, we are not part of an active group (and we expect to be). // the only thing we can do is fail the commit and let the user rejoin the group in poll(). if (generation == null) { log.info("Failing OffsetCommit request since the consumer is not part of an active group"); if (rebalanceInProgress()) { // if the client knows it is already rebalancing, we can use RebalanceInProgressException instead of // CommitFailedException to indicate this is not a fatal error return RequestFuture.failure(new RebalanceInProgressException("Offset commit cannot be completed since the " + "consumer is undergoing a rebalance for auto partition assignment. You can try completing the rebalance " + "by calling poll() and then retry the operation.")); } else { return RequestFuture.failure(new CommitFailedException("Offset commit cannot be completed since the " + "consumer is not part of an active group for auto partition assignment; it is likely that the consumer " + "was kicked out of the group.")); } } } else { generation = Generation.NO_GENERATION; groupInstanceId = null; } OffsetCommitRequest.Builder builder = new OffsetCommitRequest.Builder( new OffsetCommitRequestData() .setGroupId(this.rebalanceConfig.groupId) .setGenerationId(generation.generationId) .setMemberId(generation.memberId) .setGroupInstanceId(groupInstanceId) .setTopics(new ArrayList<>(requestTopicDataMap.values())) ); log.trace("Sending OffsetCommit request with {} to coordinator {}", offsets, coordinator); return client.send(coordinator, builder) .compose(new OffsetCommitResponseHandler(offsets, generation)); } private class OffsetCommitResponseHandler extends CoordinatorResponseHandler<OffsetCommitResponse, Void> { private final Map<TopicPartition, OffsetAndMetadata> offsets; private OffsetCommitResponseHandler(Map<TopicPartition, OffsetAndMetadata> offsets, Generation generation) { super(generation); this.offsets = offsets; } @Override public void handle(OffsetCommitResponse commitResponse, RequestFuture<Void> future) { sensors.commitSensor.record(response.requestLatencyMs()); Set<String> unauthorizedTopics = new HashSet<>(); for (OffsetCommitResponseData.OffsetCommitResponseTopic topic : commitResponse.data().topics()) { for (OffsetCommitResponseData.OffsetCommitResponsePartition partition : topic.partitions()) { TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex()); OffsetAndMetadata offsetAndMetadata = this.offsets.get(tp); long offset = offsetAndMetadata.offset(); Errors error = Errors.forCode(partition.errorCode()); if (error == Errors.NONE) { log.debug("Committed offset {} for partition {}", offset, tp); } else { if (error.exception() instanceof RetriableException) { log.warn("Offset commit failed on partition {} at offset {}: {}", tp, offset, error.message()); } else { log.error("Offset commit failed on partition {} at offset {}: {}", tp, offset, error.message()); } if (error == Errors.GROUP_AUTHORIZATION_FAILED) { future.raise(GroupAuthorizationException.forGroupId(rebalanceConfig.groupId)); return; } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { unauthorizedTopics.add(tp.topic()); } else if (error == Errors.OFFSET_METADATA_TOO_LARGE || error == Errors.INVALID_COMMIT_OFFSET_SIZE) { // raise the error to the user future.raise(error); return; } else if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS || error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { // just retry future.raise(error); return; } else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR || error == Errors.REQUEST_TIMED_OUT) { markCoordinatorUnknown(error); future.raise(error); return; } else if (error == Errors.FENCED_INSTANCE_ID) { log.info("OffsetCommit failed with {} due to group instance id {} fenced", sentGeneration, rebalanceConfig.groupInstanceId); // if the generation has changed or we are not in rebalancing, do not raise the fatal error but rebalance-in-progress if (generationUnchanged()) { future.raise(error); } else { KafkaException exception; synchronized (ConsumerCoordinator.this) { if (ConsumerCoordinator.this.state == MemberState.PREPARING_REBALANCE) { exception = new RebalanceInProgressException("Offset commit cannot be completed since the " + "consumer member's old generation is fenced by its group instance id, it is possible that " + "this consumer has already participated another rebalance and got a new generation"); } else { exception = new CommitFailedException(); } } future.raise(exception); } return; } else if (error == Errors.REBALANCE_IN_PROGRESS) { /* Consumer should not try to commit offset in between join-group and sync-group, * and hence on broker-side it is not expected to see a commit offset request * during CompletingRebalance phase; if it ever happens then broker would return * this error to indicate that we are still in the middle of a rebalance. * In this case we would throw a RebalanceInProgressException, * request re-join but do not reset generations. If the callers decide to retry they * can go ahead and call poll to finish up the rebalance first, and then try commit again. */ requestRejoin("offset commit failed since group is already rebalancing"); future.raise(new RebalanceInProgressException("Offset commit cannot be completed since the " + "consumer group is executing a rebalance at the moment. You can try completing the rebalance " + "by calling poll() and then retry commit again")); return; } else if (error == Errors.UNKNOWN_MEMBER_ID || error == Errors.ILLEGAL_GENERATION) { log.info("OffsetCommit failed with {}: {}", sentGeneration, error.message()); // only need to reset generation and re-join group if generation has not changed or we are not in rebalancing; // otherwise only raise rebalance-in-progress error KafkaException exception; synchronized (ConsumerCoordinator.this) { if (!generationUnchanged() && ConsumerCoordinator.this.state == MemberState.PREPARING_REBALANCE) { exception = new RebalanceInProgressException("Offset commit cannot be completed since the " + "consumer member's generation is already stale, meaning it has already participated another rebalance and " + "got a new generation. You can try completing the rebalance by calling poll() and then retry commit again"); } else { // don't reset generation member ID when ILLEGAL_GENERATION, since the member might be still valid resetStateOnResponseError(ApiKeys.OFFSET_COMMIT, error, error != Errors.ILLEGAL_GENERATION); exception = new CommitFailedException(); } } future.raise(exception); return; } else { future.raise(new KafkaException("Unexpected error in commit: " + error.message())); return; } } } } if (!unauthorizedTopics.isEmpty()) { log.error("Not authorized to commit to topics {}", unauthorizedTopics); future.raise(new TopicAuthorizationException(unauthorizedTopics)); } else { future.complete(null); } } } /** * Fetch the committed offsets for a set of partitions. This is a non-blocking call. The * returned future can be polled to get the actual offsets returned from the broker. * * @param partitions The set of partitions to get offsets for. * @return A request future containing the committed offsets. */ private RequestFuture<Map<TopicPartition, OffsetAndMetadata>> sendOffsetFetchRequest(Set<TopicPartition> partitions) { Node coordinator = checkAndGetCoordinator(); if (coordinator == null) return RequestFuture.coordinatorNotAvailable(); log.debug("Fetching committed offsets for partitions: {}", partitions); // construct the request OffsetFetchRequest.Builder requestBuilder = new OffsetFetchRequest.Builder(this.rebalanceConfig.groupId, true, new ArrayList<>(partitions), throwOnFetchStableOffsetsUnsupported); // send the request with a callback return client.send(coordinator, requestBuilder) .compose(new OffsetFetchResponseHandler()); } private class OffsetFetchResponseHandler extends CoordinatorResponseHandler<OffsetFetchResponse, Map<TopicPartition, OffsetAndMetadata>> { private OffsetFetchResponseHandler() { super(Generation.NO_GENERATION); } @Override public void handle(OffsetFetchResponse response, RequestFuture<Map<TopicPartition, OffsetAndMetadata>> future) { Errors responseError = response.groupLevelError(rebalanceConfig.groupId); if (responseError != Errors.NONE) { log.debug("Offset fetch failed: {}", responseError.message()); if (responseError == Errors.COORDINATOR_LOAD_IN_PROGRESS) { // just retry future.raise(responseError); } else if (responseError == Errors.NOT_COORDINATOR) { // re-discover the coordinator and retry markCoordinatorUnknown(responseError); future.raise(responseError); } else if (responseError == Errors.GROUP_AUTHORIZATION_FAILED) { future.raise(GroupAuthorizationException.forGroupId(rebalanceConfig.groupId)); } else { future.raise(new KafkaException("Unexpected error in fetch offset response: " + responseError.message())); } return; } Set<String> unauthorizedTopics = null; Map<TopicPartition, OffsetFetchResponse.PartitionData> responseData = response.partitionDataMap(rebalanceConfig.groupId); Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(responseData.size()); Set<TopicPartition> unstableTxnOffsetTopicPartitions = new HashSet<>(); for (Map.Entry<TopicPartition, OffsetFetchResponse.PartitionData> entry : responseData.entrySet()) { TopicPartition tp = entry.getKey(); OffsetFetchResponse.PartitionData partitionData = entry.getValue(); if (partitionData.hasError()) { Errors error = partitionData.error; log.debug("Failed to fetch offset for partition {}: {}", tp, error.message()); if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { future.raise(new KafkaException("Topic or Partition " + tp + " does not exist")); return; } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { if (unauthorizedTopics == null) { unauthorizedTopics = new HashSet<>(); } unauthorizedTopics.add(tp.topic()); } else if (error == Errors.UNSTABLE_OFFSET_COMMIT) { unstableTxnOffsetTopicPartitions.add(tp); } else { future.raise(new KafkaException("Unexpected error in fetch offset response for partition " + tp + ": " + error.message())); return; } } else if (partitionData.offset >= 0) { // record the position with the offset (-1 indicates no committed offset to fetch); // if there's no committed offset, record as null offsets.put(tp, new OffsetAndMetadata(partitionData.offset, partitionData.leaderEpoch, partitionData.metadata)); } else { log.info("Found no committed offset for partition {}", tp); offsets.put(tp, null); } } if (unauthorizedTopics != null) { future.raise(new TopicAuthorizationException(unauthorizedTopics)); } else if (!unstableTxnOffsetTopicPartitions.isEmpty()) { // just retry log.info("The following partitions still have unstable offsets " + "which are not cleared on the broker side: {}" + ", this could be either " + "transactional offsets waiting for completion, or " + "normal offsets waiting for replication after appending to local log", unstableTxnOffsetTopicPartitions); future.raise(new UnstableOffsetCommitException("There are unstable offsets for the requested topic partitions")); } else { future.complete(offsets); } } } private class ConsumerCoordinatorMetrics { private final String metricGrpName; private final Sensor commitSensor; private final Sensor revokeCallbackSensor; private final Sensor assignCallbackSensor; private final Sensor loseCallbackSensor; private ConsumerCoordinatorMetrics(Metrics metrics, String metricGrpPrefix) { this.metricGrpName = metricGrpPrefix + "-coordinator-metrics"; this.commitSensor = metrics.sensor("commit-latency"); this.commitSensor.add(metrics.metricName("commit-latency-avg", this.metricGrpName, "The average time taken for a commit request"), new Avg()); this.commitSensor.add(metrics.metricName("commit-latency-max", this.metricGrpName, "The max time taken for a commit request"), new Max()); this.commitSensor.add(createMeter(metrics, metricGrpName, "commit", "commit calls")); this.revokeCallbackSensor = metrics.sensor("partition-revoked-latency"); this.revokeCallbackSensor.add(metrics.metricName("partition-revoked-latency-avg", this.metricGrpName, "The average time taken for a partition-revoked rebalance listener callback"), new Avg()); this.revokeCallbackSensor.add(metrics.metricName("partition-revoked-latency-max", this.metricGrpName, "The max time taken for a partition-revoked rebalance listener callback"), new Max()); this.assignCallbackSensor = metrics.sensor("partition-assigned-latency"); this.assignCallbackSensor.add(metrics.metricName("partition-assigned-latency-avg", this.metricGrpName, "The average time taken for a partition-assigned rebalance listener callback"), new Avg()); this.assignCallbackSensor.add(metrics.metricName("partition-assigned-latency-max", this.metricGrpName, "The max time taken for a partition-assigned rebalance listener callback"), new Max()); this.loseCallbackSensor = metrics.sensor("partition-lost-latency"); this.loseCallbackSensor.add(metrics.metricName("partition-lost-latency-avg", this.metricGrpName, "The average time taken for a partition-lost rebalance listener callback"), new Avg()); this.loseCallbackSensor.add(metrics.metricName("partition-lost-latency-max", this.metricGrpName, "The max time taken for a partition-lost rebalance listener callback"), new Max()); Measurable numParts = (config, now) -> subscriptions.numAssignedPartitions(); metrics.addMetric(metrics.metricName("assigned-partitions", this.metricGrpName, "The number of partitions currently assigned to this consumer"), numParts); } } private static class MetadataSnapshot { private final int version; private final Map<String, List<PartitionRackInfo>> partitionsPerTopic; private MetadataSnapshot(Optional<String> clientRack, SubscriptionState subscription, Cluster cluster, int version) { Map<String, List<PartitionRackInfo>> partitionsPerTopic = new HashMap<>(); for (String topic : subscription.metadataTopics()) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); if (partitions != null) { List<PartitionRackInfo> partitionRacks = partitions.stream() .map(p -> new PartitionRackInfo(clientRack, p)) .collect(Collectors.toList()); partitionsPerTopic.put(topic, partitionRacks); } } this.partitionsPerTopic = partitionsPerTopic; this.version = version; } boolean matches(MetadataSnapshot other) { return version == other.version || partitionsPerTopic.equals(other.partitionsPerTopic); } @Override public String toString() { return "(version" + version + ": " + partitionsPerTopic + ")"; } } private static class PartitionRackInfo { private final Set<String> racks; PartitionRackInfo(Optional<String> clientRack, PartitionInfo partition) { if (clientRack.isPresent() && partition.replicas() != null) { racks = Arrays.stream(partition.replicas()).map(Node::rack).collect(Collectors.toSet()); } else { racks = Collections.emptySet(); } } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof PartitionRackInfo)) { return false; } PartitionRackInfo rackInfo = (PartitionRackInfo) o; return Objects.equals(racks, rackInfo.racks); } @Override public int hashCode() { return Objects.hash(racks); } @Override public String toString() { return racks.isEmpty() ? "NO_RACKS" : "racks=" + racks; } } private static class OffsetCommitCompletion { private final OffsetCommitCallback callback; private final Map<TopicPartition, OffsetAndMetadata> offsets; private final Exception exception; private OffsetCommitCompletion(OffsetCommitCallback callback, Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) { this.callback = callback; this.offsets = offsets; this.exception = exception; } public void invoke() { if (callback != null) callback.onComplete(offsets, exception); } } /* test-only classes below */ RebalanceProtocol getProtocol() { return protocol; } boolean poll(Timer timer) { return poll(timer, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/ConsumerInterceptors.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.ConsumerInterceptor; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Closeable; import java.util.List; import java.util.Map; /** * A container that holds the list {@link org.apache.kafka.clients.consumer.ConsumerInterceptor} * and wraps calls to the chain of custom interceptors. */ public class ConsumerInterceptors<K, V> implements Closeable { private static final Logger log = LoggerFactory.getLogger(ConsumerInterceptors.class); private final List<ConsumerInterceptor<K, V>> interceptors; public ConsumerInterceptors(List<ConsumerInterceptor<K, V>> interceptors) { this.interceptors = interceptors; } /** * This is called when the records are about to be returned to the user. * <p> * This method calls {@link ConsumerInterceptor#onConsume(ConsumerRecords)} for each * interceptor. Records returned from each interceptor get passed to onConsume() of the next interceptor * in the chain of interceptors. * <p> * This method does not throw exceptions. If any of the interceptors in the chain throws an exception, * it gets caught and logged, and next interceptor in the chain is called with 'records' returned by the * previous successful interceptor onConsume call. * * @param records records to be consumed by the client. * @return records that are either modified by interceptors or same as records passed to this method. */ public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) { ConsumerRecords<K, V> interceptRecords = records; for (ConsumerInterceptor<K, V> interceptor : this.interceptors) { try { interceptRecords = interceptor.onConsume(interceptRecords); } catch (Exception e) { // do not propagate interceptor exception, log and continue calling other interceptors log.warn("Error executing interceptor onConsume callback", e); } } return interceptRecords; } /** * This is called when commit request returns successfully from the broker. * <p> * This method calls {@link ConsumerInterceptor#onCommit(Map)} method for each interceptor. * <p> * This method does not throw exceptions. Exceptions thrown by any of the interceptors in the chain are logged, but not propagated. * * @param offsets A map of offsets by partition with associated metadata */ public void onCommit(Map<TopicPartition, OffsetAndMetadata> offsets) { for (ConsumerInterceptor<K, V> interceptor : this.interceptors) { try { interceptor.onCommit(offsets); } catch (Exception e) { // do not propagate interceptor exception, just log log.warn("Error executing interceptor onCommit callback", e); } } } /** * Closes every interceptor in a container. */ @Override public void close() { for (ConsumerInterceptor<K, V> interceptor : this.interceptors) { try { interceptor.close(); } catch (Exception e) { log.error("Failed to close consumer interceptor ", e); } } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.Metadata; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.utils.LogContext; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; public class ConsumerMetadata extends Metadata { private final boolean includeInternalTopics; private final boolean allowAutoTopicCreation; private final SubscriptionState subscription; private final Set<String> transientTopics; public ConsumerMetadata(long refreshBackoffMs, long metadataExpireMs, boolean includeInternalTopics, boolean allowAutoTopicCreation, SubscriptionState subscription, LogContext logContext, ClusterResourceListeners clusterResourceListeners) { super(refreshBackoffMs, metadataExpireMs, logContext, clusterResourceListeners); this.includeInternalTopics = includeInternalTopics; this.allowAutoTopicCreation = allowAutoTopicCreation; this.subscription = subscription; this.transientTopics = new HashSet<>(); } public boolean allowAutoTopicCreation() { return allowAutoTopicCreation; } @Override public synchronized MetadataRequest.Builder newMetadataRequestBuilder() { if (subscription.hasPatternSubscription()) return MetadataRequest.Builder.allTopics(); List<String> topics = new ArrayList<>(); topics.addAll(subscription.metadataTopics()); topics.addAll(transientTopics); return new MetadataRequest.Builder(topics, allowAutoTopicCreation); } synchronized void addTransientTopics(Set<String> topics) { this.transientTopics.addAll(topics); if (!fetch().topics().containsAll(topics)) requestUpdateForNewTopics(); } synchronized void clearTransientTopics() { this.transientTopics.clear(); } @Override protected synchronized boolean retainTopic(String topic, boolean isInternal, long nowMs) { if (transientTopics.contains(topic) || subscription.needsMetadata(topic)) return true; if (isInternal && !includeInternalTopics) return false; return subscription.matchesSubscribedPattern(topic); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import org.apache.kafka.common.MetricNameTemplate; import org.apache.kafka.common.metrics.Metrics; public class ConsumerMetrics { public FetchMetricsRegistry fetcherMetrics; public ConsumerMetrics(Set<String> metricsTags, String metricGrpPrefix) { this.fetcherMetrics = new FetchMetricsRegistry(metricsTags, metricGrpPrefix); } public ConsumerMetrics(String metricGroupPrefix) { this(new HashSet<String>(), metricGroupPrefix); } private List<MetricNameTemplate> getAllTemplates() { List<MetricNameTemplate> l = new ArrayList<>(this.fetcherMetrics.getAllTemplates()); return l; } public static void main(String[] args) { Set<String> tags = new HashSet<>(); tags.add("client-id"); ConsumerMetrics metrics = new ConsumerMetrics(tags, "consumer"); System.out.println(Metrics.toHtmlTable("kafka.consumer", metrics.getAllTemplates())); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ClientRequest; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.RequestCompletionHandler; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.slf4j.Logger; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantLock; /** * Higher level consumer access to the network layer with basic support for request futures. This class * is thread-safe, but provides no synchronization for response callbacks. This guarantees that no locks * are held when they are invoked. */ public class ConsumerNetworkClient implements Closeable { private static final int MAX_POLL_TIMEOUT_MS = 5000; // the mutable state of this class is protected by the object's monitor (excluding the wakeup // flag and the request completion queue below). private final Logger log; private final KafkaClient client; private final UnsentRequests unsent = new UnsentRequests(); private final Metadata metadata; private final Time time; private final long retryBackoffMs; private final int maxPollTimeoutMs; private final int requestTimeoutMs; private final AtomicBoolean wakeupDisabled = new AtomicBoolean(); // We do not need high throughput, so use a fair lock to try to avoid starvation private final ReentrantLock lock = new ReentrantLock(true); // when requests complete, they are transferred to this queue prior to invocation. The purpose // is to avoid invoking them while holding this object's monitor which can open the door for deadlocks. private final ConcurrentLinkedQueue<RequestFutureCompletionHandler> pendingCompletion = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedQueue<Node> pendingDisconnects = new ConcurrentLinkedQueue<>(); // this flag allows the client to be safely woken up without waiting on the lock above. It is // atomic to avoid the need to acquire the lock above in order to enable it concurrently. private final AtomicBoolean wakeup = new AtomicBoolean(false); public ConsumerNetworkClient(LogContext logContext, KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, int requestTimeoutMs, int maxPollTimeoutMs) { this.log = logContext.logger(ConsumerNetworkClient.class); this.client = client; this.metadata = metadata; this.time = time; this.retryBackoffMs = retryBackoffMs; this.maxPollTimeoutMs = Math.min(maxPollTimeoutMs, MAX_POLL_TIMEOUT_MS); this.requestTimeoutMs = requestTimeoutMs; } public int defaultRequestTimeoutMs() { return requestTimeoutMs; } /** * Send a request with the default timeout. See {@link #send(Node, AbstractRequest.Builder, int)}. */ public RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder) { return send(node, requestBuilder, requestTimeoutMs); } /** * Send a new request. Note that the request is not actually transmitted on the * network until one of the {@link #poll(Timer)} variants is invoked. At this * point the request will either be transmitted successfully or will fail. * Use the returned future to obtain the result of the send. Note that there is no * need to check for disconnects explicitly on the {@link ClientResponse} object; * instead, the future will be failed with a {@link DisconnectException}. * * @param node The destination of the request * @param requestBuilder A builder for the request payload * @param requestTimeoutMs Maximum time in milliseconds to await a response before disconnecting the socket and * cancelling the request. The request may be cancelled sooner if the socket disconnects * for any reason. * @return A future which indicates the result of the send. */ public RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder, int requestTimeoutMs) { long now = time.milliseconds(); RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler(); ClientRequest clientRequest = client.newClientRequest(node.idString(), requestBuilder, now, true, requestTimeoutMs, completionHandler); unsent.put(node, clientRequest); // wakeup the client in case it is blocking in poll so that we can send the queued request client.wakeup(); return completionHandler.future; } public Node leastLoadedNode() { lock.lock(); try { return client.leastLoadedNode(time.milliseconds()); } finally { lock.unlock(); } } public boolean hasReadyNodes(long now) { lock.lock(); try { return client.hasReadyNodes(now); } finally { lock.unlock(); } } /** * Block waiting on the metadata refresh with a timeout. * * @return true if update succeeded, false otherwise. */ public boolean awaitMetadataUpdate(Timer timer) { int version = this.metadata.requestUpdate(); do { poll(timer); } while (this.metadata.updateVersion() == version && timer.notExpired()); return this.metadata.updateVersion() > version; } /** * Ensure our metadata is fresh (if an update is expected, this will block * until it has completed). */ boolean ensureFreshMetadata(Timer timer) { if (this.metadata.updateRequested() || this.metadata.timeToNextUpdate(timer.currentTimeMs()) == 0) { return awaitMetadataUpdate(timer); } else { // the metadata is already fresh return true; } } /** * Wakeup an active poll. This will cause the polling thread to throw an exception either * on the current poll if one is active, or the next poll. */ public void wakeup() { // wakeup should be safe without holding the client lock since it simply delegates to // Selector's wakeup, which is thread-safe log.debug("Received user wakeup"); this.wakeup.set(true); this.client.wakeup(); } /** * Block indefinitely until the given request future has finished. * @param future The request future to await. * @throws WakeupException if {@link #wakeup()} is called from another thread * @throws InterruptException if the calling thread is interrupted */ public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(time.timer(Long.MAX_VALUE), future); } /** * Block until the provided request future request has finished or the timeout has expired. * @param future The request future to wait for * @param timer Timer bounding how long this method can block * @return true if the future is done, false otherwise * @throws WakeupException if {@link #wakeup()} is called from another thread * @throws InterruptException if the calling thread is interrupted */ public boolean poll(RequestFuture<?> future, Timer timer) { return poll(future, timer, false); } /** * Block until the provided request future request has finished or the timeout has expired. * * @param future The request future to wait for * @param timer Timer bounding how long this method can block * @param disableWakeup true if we should not check for wakeups, false otherwise * * @return true if the future is done, false otherwise * @throws WakeupException if {@link #wakeup()} is called from another thread and `disableWakeup` is false * @throws InterruptException if the calling thread is interrupted */ public boolean poll(RequestFuture<?> future, Timer timer, boolean disableWakeup) { do { poll(timer, future, disableWakeup); } while (!future.isDone() && timer.notExpired()); return future.isDone(); } /** * Poll for any network IO. * @param timer Timer bounding how long this method can block * @throws WakeupException if {@link #wakeup()} is called from another thread * @throws InterruptException if the calling thread is interrupted */ public void poll(Timer timer) { poll(timer, null); } /** * Poll for any network IO. * @param timer Timer bounding how long this method can block * @param pollCondition Nullable blocking condition */ public void poll(Timer timer, PollCondition pollCondition) { poll(timer, pollCondition, false); } /** * Poll for any network IO. * @param timer Timer bounding how long this method can block * @param pollCondition Nullable blocking condition * @param disableWakeup If TRUE disable triggering wake-ups */ public void poll(Timer timer, PollCondition pollCondition, boolean disableWakeup) { // there may be handlers which need to be invoked if we woke up the previous call to poll firePendingCompletedRequests(); lock.lock(); try { // Handle async disconnects prior to attempting any sends handlePendingDisconnects(); // send all the requests we can send now long pollDelayMs = trySend(timer.currentTimeMs()); // check whether the poll is still needed by the caller. Note that if the expected completion // condition becomes satisfied after the call to shouldBlock() (because of a fired completion // handler), the client will be woken up. if (pendingCompletion.isEmpty() && (pollCondition == null || pollCondition.shouldBlock())) { // if there are no requests in flight, do not block longer than the retry backoff long pollTimeout = Math.min(timer.remainingMs(), pollDelayMs); if (client.inFlightRequestCount() == 0) pollTimeout = Math.min(pollTimeout, retryBackoffMs); client.poll(pollTimeout, timer.currentTimeMs()); } else { client.poll(0, timer.currentTimeMs()); } timer.update(); // handle any disconnects by failing the active requests. note that disconnects must // be checked immediately following poll since any subsequent call to client.ready() // will reset the disconnect status checkDisconnects(timer.currentTimeMs()); if (!disableWakeup) { // trigger wakeups after checking for disconnects so that the callbacks will be ready // to be fired on the next call to poll() maybeTriggerWakeup(); } // throw InterruptException if this thread is interrupted maybeThrowInterruptException(); // try again to send requests since buffer space may have been // cleared or a connect finished in the poll trySend(timer.currentTimeMs()); // fail requests that couldn't be sent if they have expired failExpiredRequests(timer.currentTimeMs()); // clean unsent requests collection to keep the map from growing indefinitely unsent.clean(); } finally { lock.unlock(); } // called without the lock to avoid deadlock potential if handlers need to acquire locks firePendingCompletedRequests(); metadata.maybeThrowAnyException(); } /** * Poll for network IO and return immediately. This will not trigger wakeups. */ public void pollNoWakeup() { poll(time.timer(0), null, true); } /** * Poll for network IO in best-effort only trying to transmit the ready-to-send request * Do not check any pending requests or metadata errors so that no exception should ever * be thrown, also no wakeups be triggered and no interrupted exception either. */ public void transmitSends() { Timer timer = time.timer(0); // do not try to handle any disconnects, prev request failures, metadata exception etc; // just try once and return immediately lock.lock(); try { // send all the requests we can send now trySend(timer.currentTimeMs()); client.poll(0, timer.currentTimeMs()); } finally { lock.unlock(); } } /** * Block until all pending requests from the given node have finished. * @param node The node to await requests from * @param timer Timer bounding how long this method can block * @return true If all requests finished, false if the timeout expired first */ public boolean awaitPendingRequests(Node node, Timer timer) { while (hasPendingRequests(node) && timer.notExpired()) { poll(timer); } return !hasPendingRequests(node); } /** * Get the count of pending requests to the given node. This includes both request that * have been transmitted (i.e. in-flight requests) and those which are awaiting transmission. * @param node The node in question * @return The number of pending requests */ public int pendingRequestCount(Node node) { lock.lock(); try { return unsent.requestCount(node) + client.inFlightRequestCount(node.idString()); } finally { lock.unlock(); } } /** * Check whether there is pending request to the given node. This includes both request that * have been transmitted (i.e. in-flight requests) and those which are awaiting transmission. * @param node The node in question * @return A boolean indicating whether there is pending request */ public boolean hasPendingRequests(Node node) { if (unsent.hasRequests(node)) return true; lock.lock(); try { return client.hasInFlightRequests(node.idString()); } finally { lock.unlock(); } } /** * Get the total count of pending requests from all nodes. This includes both requests that * have been transmitted (i.e. in-flight requests) and those which are awaiting transmission. * @return The total count of pending requests */ public int pendingRequestCount() { lock.lock(); try { return unsent.requestCount() + client.inFlightRequestCount(); } finally { lock.unlock(); } } /** * Check whether there is pending request. This includes both requests that * have been transmitted (i.e. in-flight requests) and those which are awaiting transmission. * @return A boolean indicating whether there is pending request */ public boolean hasPendingRequests() { if (unsent.hasRequests()) return true; lock.lock(); try { return client.hasInFlightRequests(); } finally { lock.unlock(); } } private void firePendingCompletedRequests() { boolean completedRequestsFired = false; for (;;) { RequestFutureCompletionHandler completionHandler = pendingCompletion.poll(); if (completionHandler == null) break; completionHandler.fireCompletion(); completedRequestsFired = true; } // wakeup the client in case it is blocking in poll for this future's completion if (completedRequestsFired) client.wakeup(); } private void checkDisconnects(long now) { // any disconnects affecting requests that have already been transmitted will be handled // by NetworkClient, so we just need to check whether connections for any of the unsent // requests have been disconnected; if they have, then we complete the corresponding future // and set the disconnect flag in the ClientResponse for (Node node : unsent.nodes()) { if (client.connectionFailed(node)) { // Remove entry before invoking request callback to avoid callbacks handling // coordinator failures traversing the unsent list again. Collection<ClientRequest> requests = unsent.remove(node); for (ClientRequest request : requests) { RequestFutureCompletionHandler handler = (RequestFutureCompletionHandler) request.callback(); AuthenticationException authenticationException = client.authenticationException(node); handler.onComplete(new ClientResponse(request.makeHeader(request.requestBuilder().latestAllowedVersion()), request.callback(), request.destination(), request.createdTimeMs(), now, true, null, authenticationException, null)); } } } } private void handlePendingDisconnects() { lock.lock(); try { while (true) { Node node = pendingDisconnects.poll(); if (node == null) break; failUnsentRequests(node, DisconnectException.INSTANCE); client.disconnect(node.idString()); } } finally { lock.unlock(); } } public void disconnectAsync(Node node) { pendingDisconnects.offer(node); client.wakeup(); } private void failExpiredRequests(long now) { // clear all expired unsent requests and fail their corresponding futures Collection<ClientRequest> expiredRequests = unsent.removeExpiredRequests(now); for (ClientRequest request : expiredRequests) { RequestFutureCompletionHandler handler = (RequestFutureCompletionHandler) request.callback(); handler.onFailure(new TimeoutException("Failed to send request after " + request.requestTimeoutMs() + " ms.")); } } private void failUnsentRequests(Node node, RuntimeException e) { // clear unsent requests to node and fail their corresponding futures lock.lock(); try { Collection<ClientRequest> unsentRequests = unsent.remove(node); for (ClientRequest unsentRequest : unsentRequests) { RequestFutureCompletionHandler handler = (RequestFutureCompletionHandler) unsentRequest.callback(); handler.onFailure(e); } } finally { lock.unlock(); } } // Visible for testing long trySend(long now) { long pollDelayMs = maxPollTimeoutMs; // send any requests that can be sent now for (Node node : unsent.nodes()) { Iterator<ClientRequest> iterator = unsent.requestIterator(node); if (iterator.hasNext()) pollDelayMs = Math.min(pollDelayMs, client.pollDelayMs(node, now)); while (iterator.hasNext()) { ClientRequest request = iterator.next(); if (client.ready(node, now)) { client.send(request, now); iterator.remove(); } else { // try next node when current node is not ready break; } } } return pollDelayMs; } public void maybeTriggerWakeup() { if (!wakeupDisabled.get() && wakeup.get()) { log.debug("Raising WakeupException in response to user wakeup"); wakeup.set(false); throw new WakeupException(); } } private void maybeThrowInterruptException() { if (Thread.interrupted()) { throw new InterruptException(new InterruptedException()); } } public void disableWakeups() { wakeupDisabled.set(true); } @Override public void close() throws IOException { lock.lock(); try { client.close(); } finally { lock.unlock(); } } /** * Check if the code is disconnected and unavailable for immediate reconnection (i.e. if it is in * reconnect backoff window following the disconnect). */ public boolean isUnavailable(Node node) { lock.lock(); try { return client.connectionFailed(node) && client.connectionDelay(node, time.milliseconds()) > 0; } finally { lock.unlock(); } } /** * Check for an authentication error on a given node and raise the exception if there is one. */ public void maybeThrowAuthFailure(Node node) { lock.lock(); try { AuthenticationException exception = client.authenticationException(node); if (exception != null) throw exception; } finally { lock.unlock(); } } /** * Initiate a connection if currently possible. This is only really useful for resetting the failed * status of a socket. If there is an actual request to send, then {@link #send(Node, AbstractRequest.Builder)} * should be used. * @param node The node to connect to */ public void tryConnect(Node node) { lock.lock(); try { client.ready(node, time.milliseconds()); } finally { lock.unlock(); } } private class RequestFutureCompletionHandler implements RequestCompletionHandler { private final RequestFuture<ClientResponse> future; private ClientResponse response; private RuntimeException e; private RequestFutureCompletionHandler() { this.future = new RequestFuture<>(); } public void fireCompletion() { if (e != null) { future.raise(e); } else if (response.authenticationException() != null) { future.raise(response.authenticationException()); } else if (response.wasDisconnected()) { log.debug("Cancelled request with header {} due to node {} being disconnected", response.requestHeader(), response.destination()); future.raise(DisconnectException.INSTANCE); } else if (response.versionMismatch() != null) { future.raise(response.versionMismatch()); } else { future.complete(response); } } public void onFailure(RuntimeException e) { this.e = e; pendingCompletion.add(this); } @Override public void onComplete(ClientResponse response) { this.response = response; pendingCompletion.add(this); } } /** * When invoking poll from a multi-threaded environment, it is possible that the condition that * the caller is awaiting has already been satisfied prior to the invocation of poll. We therefore * introduce this interface to push the condition checking as close as possible to the invocation * of poll. In particular, the check will be done while holding the lock used to protect concurrent * access to {@link org.apache.kafka.clients.NetworkClient}, which means implementations must be * very careful about locking order if the callback must acquire additional locks. */ public interface PollCondition { /** * Return whether the caller is still awaiting an IO event. * @return true if so, false otherwise. */ boolean shouldBlock(); } /* * A thread-safe helper class to hold requests per node that have not been sent yet */ private static final class UnsentRequests { private final ConcurrentMap<Node, ConcurrentLinkedQueue<ClientRequest>> unsent; private UnsentRequests() { unsent = new ConcurrentHashMap<>(); } public void put(Node node, ClientRequest request) { // the lock protects the put from a concurrent removal of the queue for the node synchronized (unsent) { ConcurrentLinkedQueue<ClientRequest> requests = unsent.computeIfAbsent(node, key -> new ConcurrentLinkedQueue<>()); requests.add(request); } } public int requestCount(Node node) { ConcurrentLinkedQueue<ClientRequest> requests = unsent.get(node); return requests == null ? 0 : requests.size(); } public int requestCount() { int total = 0; for (ConcurrentLinkedQueue<ClientRequest> requests : unsent.values()) total += requests.size(); return total; } public boolean hasRequests(Node node) { ConcurrentLinkedQueue<ClientRequest> requests = unsent.get(node); return requests != null && !requests.isEmpty(); } public boolean hasRequests() { for (ConcurrentLinkedQueue<ClientRequest> requests : unsent.values()) if (!requests.isEmpty()) return true; return false; } private Collection<ClientRequest> removeExpiredRequests(long now) { List<ClientRequest> expiredRequests = new ArrayList<>(); for (ConcurrentLinkedQueue<ClientRequest> requests : unsent.values()) { Iterator<ClientRequest> requestIterator = requests.iterator(); while (requestIterator.hasNext()) { ClientRequest request = requestIterator.next(); long elapsedMs = Math.max(0, now - request.createdTimeMs()); if (elapsedMs > request.requestTimeoutMs()) { expiredRequests.add(request); requestIterator.remove(); } else break; } } return expiredRequests; } public void clean() { // the lock protects removal from a concurrent put which could otherwise mutate the // queue after it has been removed from the map synchronized (unsent) { Iterator<ConcurrentLinkedQueue<ClientRequest>> iterator = unsent.values().iterator(); while (iterator.hasNext()) { ConcurrentLinkedQueue<ClientRequest> requests = iterator.next(); if (requests.isEmpty()) iterator.remove(); } } } public Collection<ClientRequest> remove(Node node) { // the lock protects removal from a concurrent put which could otherwise mutate the // queue after it has been removed from the map synchronized (unsent) { ConcurrentLinkedQueue<ClientRequest> requests = unsent.remove(node); return requests == null ? Collections.<ClientRequest>emptyList() : requests; } } public Iterator<ClientRequest> requestIterator(Node node) { ConcurrentLinkedQueue<ClientRequest> requests = unsent.get(node); return requests == null ? Collections.<ClientRequest>emptyIterator() : requests.iterator(); } public Collection<Node> nodes() { return unsent.keySet(); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/ConsumerProtocol.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment; import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.ConsumerProtocolAssignment; import org.apache.kafka.common.message.ConsumerProtocolSubscription; import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.types.SchemaException; import java.nio.ByteBuffer; import java.nio.BufferUnderflowException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Optional; /** * ConsumerProtocol contains the schemas for consumer subscriptions and assignments for use with * Kafka's generalized group management protocol. * * The current implementation assumes that future versions will not break compatibility. When * it encounters a newer version, it parses it using the current format. This basically means * that new versions cannot remove or reorder any of the existing fields. */ public class ConsumerProtocol { public static final String PROTOCOL_TYPE = "consumer"; static { // Safety check to ensure that both parts of the consumer protocol remain in sync. if (ConsumerProtocolSubscription.LOWEST_SUPPORTED_VERSION != ConsumerProtocolAssignment.LOWEST_SUPPORTED_VERSION) throw new IllegalStateException("Subscription and Assignment schemas must have the " + "same lowest version"); if (ConsumerProtocolSubscription.HIGHEST_SUPPORTED_VERSION != ConsumerProtocolAssignment.HIGHEST_SUPPORTED_VERSION) throw new IllegalStateException("Subscription and Assignment schemas must have the " + "same highest version"); } public static short deserializeVersion(final ByteBuffer buffer) { try { return buffer.getShort(); } catch (BufferUnderflowException e) { throw new SchemaException("Buffer underflow while parsing consumer protocol's header", e); } } public static ByteBuffer serializeSubscription(final Subscription subscription) { return serializeSubscription(subscription, ConsumerProtocolSubscription.HIGHEST_SUPPORTED_VERSION); } public static ByteBuffer serializeSubscription(final Subscription subscription, short version) { version = checkSubscriptionVersion(version); ConsumerProtocolSubscription data = new ConsumerProtocolSubscription(); List<String> topics = new ArrayList<>(subscription.topics()); Collections.sort(topics); data.setTopics(topics); data.setUserData(subscription.userData() != null ? subscription.userData().duplicate() : null); List<TopicPartition> ownedPartitions = new ArrayList<>(subscription.ownedPartitions()); ownedPartitions.sort(Comparator.comparing(TopicPartition::topic).thenComparing(TopicPartition::partition)); ConsumerProtocolSubscription.TopicPartition partition = null; for (TopicPartition tp : ownedPartitions) { if (partition == null || !partition.topic().equals(tp.topic())) { partition = new ConsumerProtocolSubscription.TopicPartition().setTopic(tp.topic()); data.ownedPartitions().add(partition); } partition.partitions().add(tp.partition()); } subscription.rackId().ifPresent(data::setRackId); data.setGenerationId(subscription.generationId().orElse(-1)); return MessageUtil.toVersionPrefixedByteBuffer(version, data); } public static Subscription deserializeSubscription(final ByteBuffer buffer, short version) { version = checkSubscriptionVersion(version); try { ConsumerProtocolSubscription data = new ConsumerProtocolSubscription(new ByteBufferAccessor(buffer), version); List<TopicPartition> ownedPartitions = new ArrayList<>(); for (ConsumerProtocolSubscription.TopicPartition tp : data.ownedPartitions()) { for (Integer partition : tp.partitions()) { ownedPartitions.add(new TopicPartition(tp.topic(), partition)); } } return new Subscription( data.topics(), data.userData() != null ? data.userData().duplicate() : null, ownedPartitions, data.generationId(), data.rackId() == null || data.rackId().isEmpty() ? Optional.empty() : Optional.of(data.rackId())); } catch (BufferUnderflowException e) { throw new SchemaException("Buffer underflow while parsing consumer protocol's subscription", e); } } public static Subscription deserializeSubscription(final ByteBuffer buffer) { return deserializeSubscription(buffer, deserializeVersion(buffer)); } public static ByteBuffer serializeAssignment(final Assignment assignment) { return serializeAssignment(assignment, ConsumerProtocolAssignment.HIGHEST_SUPPORTED_VERSION); } public static ByteBuffer serializeAssignment(final Assignment assignment, short version) { version = checkAssignmentVersion(version); ConsumerProtocolAssignment data = new ConsumerProtocolAssignment(); data.setUserData(assignment.userData() != null ? assignment.userData().duplicate() : null); assignment.partitions().forEach(tp -> { ConsumerProtocolAssignment.TopicPartition partition = data.assignedPartitions().find(tp.topic()); if (partition == null) { partition = new ConsumerProtocolAssignment.TopicPartition().setTopic(tp.topic()); data.assignedPartitions().add(partition); } partition.partitions().add(tp.partition()); }); return MessageUtil.toVersionPrefixedByteBuffer(version, data); } public static Assignment deserializeAssignment(final ByteBuffer buffer, short version) { version = checkAssignmentVersion(version); try { ConsumerProtocolAssignment data = new ConsumerProtocolAssignment(new ByteBufferAccessor(buffer), version); List<TopicPartition> assignedPartitions = new ArrayList<>(); for (ConsumerProtocolAssignment.TopicPartition tp : data.assignedPartitions()) { for (Integer partition : tp.partitions()) { assignedPartitions.add(new TopicPartition(tp.topic(), partition)); } } return new Assignment( assignedPartitions, data.userData() != null ? data.userData().duplicate() : null); } catch (BufferUnderflowException e) { throw new SchemaException("Buffer underflow while parsing consumer protocol's assignment", e); } } public static Assignment deserializeAssignment(final ByteBuffer buffer) { return deserializeAssignment(buffer, deserializeVersion(buffer)); } private static short checkSubscriptionVersion(final short version) { if (version < ConsumerProtocolSubscription.LOWEST_SUPPORTED_VERSION) throw new SchemaException("Unsupported subscription version: " + version); else if (version > ConsumerProtocolSubscription.HIGHEST_SUPPORTED_VERSION) return ConsumerProtocolSubscription.HIGHEST_SUPPORTED_VERSION; else return version; } private static short checkAssignmentVersion(final short version) { if (version < ConsumerProtocolAssignment.LOWEST_SUPPORTED_VERSION) throw new SchemaException("Unsupported assignment version: " + version); else if (version > ConsumerProtocolAssignment.HIGHEST_SUPPORTED_VERSION) return ConsumerProtocolAssignment.HIGHEST_SUPPORTED_VERSION; else return version; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.message.FindCoordinatorRequestData; import org.apache.kafka.common.message.FindCoordinatorResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.FindCoordinatorResponse; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; import java.util.Collections; import java.util.Objects; import java.util.Optional; /** * This is responsible for timing to send the next {@link FindCoordinatorRequest} based on the following criteria: * * Whether there is an existing coordinator. * Whether there is an inflight request. * Whether the backoff timer has expired. * The {@link org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.PollResult} contains either a wait timer * or a singleton list of {@link org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.UnsentRequest}. * * The {@link FindCoordinatorRequest} will be handled by the {@link #onResponse(long, FindCoordinatorResponse)} callback, which * subsequently invokes {@code onResponse} to handle the exception and response. Note that the coordinator node will be * marked {@code null} upon receiving a failure. */ public class CoordinatorRequestManager implements RequestManager { private static final long COORDINATOR_DISCONNECT_LOGGING_INTERVAL_MS = 60 * 1000; private final Time time; private final Logger log; private final ErrorEventHandler nonRetriableErrorHandler; private final String groupId; private final RequestState coordinatorRequestState; private long timeMarkedUnknownMs = -1L; // starting logging a warning only after unable to connect for a while private long totalDisconnectedMin = 0; private Node coordinator; public CoordinatorRequestManager( final Time time, final LogContext logContext, final long retryBackoffMs, final ErrorEventHandler errorHandler, final String groupId ) { Objects.requireNonNull(groupId); this.time = time; this.log = logContext.logger(this.getClass()); this.nonRetriableErrorHandler = errorHandler; this.groupId = groupId; this.coordinatorRequestState = new RequestState(retryBackoffMs); } /** * Poll for the FindCoordinator request. * If we don't need to discover a coordinator, this method will return a PollResult with Long.MAX_VALUE backoff time and an empty list. * If we are still backing off from a previous attempt, this method will return a PollResult with the remaining backoff time and an empty list. * Otherwise, this returns will return a PollResult with a singleton list of UnsentRequest and Long.MAX_VALUE backoff time. * Note that this method does not involve any actual network IO, and it only determines if we need to send a new request or not. * * @param currentTimeMs current time in ms. * @return {@link org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.PollResult}. This will not be {@code null}. */ @Override public NetworkClientDelegate.PollResult poll(final long currentTimeMs) { if (this.coordinator != null) { return new NetworkClientDelegate.PollResult(Long.MAX_VALUE, Collections.emptyList()); } if (coordinatorRequestState.canSendRequest(currentTimeMs)) { NetworkClientDelegate.UnsentRequest request = makeFindCoordinatorRequest(currentTimeMs); return new NetworkClientDelegate.PollResult(Long.MAX_VALUE, Collections.singletonList(request)); } return new NetworkClientDelegate.PollResult( coordinatorRequestState.remainingBackoffMs(currentTimeMs), Collections.emptyList()); } private NetworkClientDelegate.UnsentRequest makeFindCoordinatorRequest(final long currentTimeMs) { coordinatorRequestState.onSendAttempt(currentTimeMs); FindCoordinatorRequestData data = new FindCoordinatorRequestData() .setKeyType(FindCoordinatorRequest.CoordinatorType.GROUP.id()) .setKey(this.groupId); NetworkClientDelegate.UnsentRequest unsentRequest = new NetworkClientDelegate.UnsentRequest( new FindCoordinatorRequest.Builder(data), Optional.empty() ); unsentRequest.future().whenComplete((clientResponse, throwable) -> { long responseTimeMs = time.milliseconds(); if (clientResponse != null) { FindCoordinatorResponse response = (FindCoordinatorResponse) clientResponse.responseBody(); onResponse(responseTimeMs, response); } else { onFailedResponse(responseTimeMs, throwable); } }); return unsentRequest; } /** * Mark the current coordinator null. * * @param cause why the coordinator is marked unknown. * @param currentTimeMs the current time in ms. */ public void markCoordinatorUnknown(final String cause, final long currentTimeMs) { if (this.coordinator != null) { log.info("Group coordinator {} is unavailable or invalid due to cause: {}. " + "Rediscovery will be attempted.", this.coordinator, cause); this.coordinator = null; timeMarkedUnknownMs = currentTimeMs; totalDisconnectedMin = 0; } else { long durationOfOngoingDisconnectMs = Math.max(0, currentTimeMs - timeMarkedUnknownMs); long currDisconnectMin = durationOfOngoingDisconnectMs / COORDINATOR_DISCONNECT_LOGGING_INTERVAL_MS; if (currDisconnectMin > this.totalDisconnectedMin) { log.debug("Consumer has been disconnected from the group coordinator for {}ms", durationOfOngoingDisconnectMs); totalDisconnectedMin = currDisconnectMin; } } } private void onSuccessfulResponse( final long currentTimeMs, final FindCoordinatorResponseData.Coordinator coordinator ) { // use MAX_VALUE - node.id as the coordinator id to allow separate connections // for the coordinator in the underlying network client layer int coordinatorConnectionId = Integer.MAX_VALUE - coordinator.nodeId(); this.coordinator = new Node( coordinatorConnectionId, coordinator.host(), coordinator.port()); log.info("Discovered group coordinator {}", coordinator); coordinatorRequestState.onSuccessfulAttempt(currentTimeMs); } private void onFailedResponse( final long currentTimeMs, final Throwable exception ) { coordinatorRequestState.onFailedAttempt(currentTimeMs); markCoordinatorUnknown("FindCoordinator failed with exception", currentTimeMs); if (exception instanceof RetriableException) { log.debug("FindCoordinator request failed due to retriable exception", exception); return; } if (exception == Errors.GROUP_AUTHORIZATION_FAILED.exception()) { log.debug("FindCoordinator request failed due to authorization error {}", exception.getMessage()); nonRetriableErrorHandler.handle(GroupAuthorizationException.forGroupId(this.groupId)); return; } log.warn("FindCoordinator request failed due to fatal exception", exception); nonRetriableErrorHandler.handle(exception); } /** * Handles the response upon completing the {@link FindCoordinatorRequest} if the * future returned successfully. This method must still unwrap the response object * to check for protocol errors. * * @param currentTimeMs current time in ms. * @param response the response for finding the coordinator. null if an exception is thrown. */ private void onResponse( final long currentTimeMs, final FindCoordinatorResponse response ) { // handles Runtime exception Optional<FindCoordinatorResponseData.Coordinator> coordinator = response.coordinatorByKey(this.groupId); if (!coordinator.isPresent()) { String msg = String.format("Response did not contain expected coordinator section for groupId: %s", this.groupId); onFailedResponse(currentTimeMs, new IllegalStateException(msg)); return; } FindCoordinatorResponseData.Coordinator node = coordinator.get(); if (node.errorCode() != Errors.NONE.code()) { onFailedResponse(currentTimeMs, Errors.forCode(node.errorCode()).exception()); return; } onSuccessfulResponse(currentTimeMs, node); } /** * Returns the current coordinator node. * * @return the current coordinator node. */ public Optional<Node> coordinator() { return Optional.ofNullable(this.coordinator); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/DefaultBackgroundThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventProcessor; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.utils.KafkaThread; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Queue; import java.util.concurrent.BlockingQueue; /** * Background thread runnable that consumes {@code ApplicationEvent} and * produces {@code BackgroundEvent}. It uses an event loop to consume and * produce events, and poll the network client to handle network IO. * <p> * It holds a reference to the {@link SubscriptionState}, which is * initialized by the polling thread. */ public class DefaultBackgroundThread extends KafkaThread { private static final long MAX_POLL_TIMEOUT_MS = 5000; private static final String BACKGROUND_THREAD_NAME = "consumer_background_thread"; private final Time time; private final Logger log; private final BlockingQueue<ApplicationEvent> applicationEventQueue; private final BlockingQueue<BackgroundEvent> backgroundEventQueue; private final ConsumerMetadata metadata; private final ConsumerConfig config; // empty if groupId is null private final ApplicationEventProcessor applicationEventProcessor; private final NetworkClientDelegate networkClientDelegate; private final ErrorEventHandler errorEventHandler; private final GroupState groupState; private boolean running; private final Map<RequestManager.Type, Optional<RequestManager>> requestManagerRegistry; // Visible for testing DefaultBackgroundThread(final Time time, final ConsumerConfig config, final LogContext logContext, final BlockingQueue<ApplicationEvent> applicationEventQueue, final BlockingQueue<BackgroundEvent> backgroundEventQueue, final ErrorEventHandler errorEventHandler, final ApplicationEventProcessor processor, final ConsumerMetadata metadata, final NetworkClientDelegate networkClient, final GroupState groupState, final CoordinatorRequestManager coordinatorManager, final CommitRequestManager commitRequestManager) { super(BACKGROUND_THREAD_NAME, true); this.time = time; this.running = true; this.log = logContext.logger(getClass()); this.applicationEventQueue = applicationEventQueue; this.backgroundEventQueue = backgroundEventQueue; this.applicationEventProcessor = processor; this.config = config; this.metadata = metadata; this.networkClientDelegate = networkClient; this.errorEventHandler = errorEventHandler; this.groupState = groupState; this.requestManagerRegistry = new HashMap<>(); this.requestManagerRegistry.put(RequestManager.Type.COORDINATOR, Optional.ofNullable(coordinatorManager)); this.requestManagerRegistry.put(RequestManager.Type.COMMIT, Optional.ofNullable(commitRequestManager)); } public DefaultBackgroundThread(final Time time, final ConsumerConfig config, final GroupRebalanceConfig rebalanceConfig, final LogContext logContext, final BlockingQueue<ApplicationEvent> applicationEventQueue, final BlockingQueue<BackgroundEvent> backgroundEventQueue, final ConsumerMetadata metadata, final KafkaClient networkClient) { super(BACKGROUND_THREAD_NAME, true); try { this.time = time; this.log = logContext.logger(getClass()); this.applicationEventQueue = applicationEventQueue; this.backgroundEventQueue = backgroundEventQueue; this.config = config; // subscriptionState is initialized by the polling thread this.metadata = metadata; this.networkClientDelegate = new NetworkClientDelegate( this.time, this.config, logContext, networkClient); this.running = true; this.errorEventHandler = new ErrorEventHandler(this.backgroundEventQueue); this.groupState = new GroupState(rebalanceConfig); this.requestManagerRegistry = Collections.unmodifiableMap(buildRequestManagerRegistry(logContext)); this.applicationEventProcessor = new ApplicationEventProcessor(backgroundEventQueue, requestManagerRegistry); } catch (final Exception e) { close(); throw new KafkaException("Failed to construct background processor", e.getCause()); } } private Map<RequestManager.Type, Optional<RequestManager>> buildRequestManagerRegistry(final LogContext logContext) { Map<RequestManager.Type, Optional<RequestManager>> registry = new HashMap<>(); CoordinatorRequestManager coordinatorManager = groupState.groupId == null ? null : new CoordinatorRequestManager( time, logContext, config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG), errorEventHandler, groupState.groupId); // Add subscriptionState CommitRequestManager commitRequestManager = coordinatorManager == null ? null : new CommitRequestManager(time, logContext, null, config, coordinatorManager, groupState); registry.put(RequestManager.Type.COORDINATOR, Optional.ofNullable(coordinatorManager)); registry.put(RequestManager.Type.COMMIT, Optional.ofNullable(commitRequestManager)); return registry; } @Override public void run() { try { log.debug("Background thread started"); while (running) { try { runOnce(); } catch (final WakeupException e) { log.debug("WakeupException caught, background thread won't be interrupted"); // swallow the wakeup exception to prevent killing the background thread. } } } catch (final Throwable t) { log.error("The background thread failed due to unexpected error", t); throw new RuntimeException(t); } finally { close(); log.debug("{} closed", getClass()); } } /** * Poll and process an {@link ApplicationEvent}. It performs the following tasks: * 1. Drains and try to process all the requests in the queue. * 2. Iterate through the registry, poll, and get the next poll time for the network poll * 3. Poll the networkClient to send and retrieve the response. */ void runOnce() { drain(); final long currentTimeMs = time.milliseconds(); final long pollWaitTimeMs = requestManagerRegistry.values().stream() .filter(Optional::isPresent) .map(m -> m.get().poll(currentTimeMs)) .map(this::handlePollResult) .reduce(MAX_POLL_TIMEOUT_MS, Math::min); networkClientDelegate.poll(pollWaitTimeMs, currentTimeMs); } private void drain() { Queue<ApplicationEvent> events = pollApplicationEvent(); for (ApplicationEvent event : events) { log.debug("Consuming application event: {}", event); consumeApplicationEvent(event); } } long handlePollResult(NetworkClientDelegate.PollResult res) { if (!res.unsentRequests.isEmpty()) { networkClientDelegate.addAll(res.unsentRequests); } return res.timeUntilNextPollMs; } private Queue<ApplicationEvent> pollApplicationEvent() { if (this.applicationEventQueue.isEmpty()) { return new LinkedList<>(); } LinkedList<ApplicationEvent> res = new LinkedList<>(); this.applicationEventQueue.drainTo(res); return res; } private void consumeApplicationEvent(final ApplicationEvent event) { Objects.requireNonNull(event); applicationEventProcessor.process(event); } public boolean isRunning() { return this.running; } public void wakeup() { networkClientDelegate.wakeup(); } public void close() { this.running = false; this.wakeup(); Utils.closeQuietly(networkClientDelegate, "network client utils"); Utils.closeQuietly(metadata, "consumer metadata client"); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/DefaultEventHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.ClientUtils; import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.NetworkClient; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; import org.apache.kafka.clients.consumer.internals.events.EventHandler; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.network.ChannelBuilder; import org.apache.kafka.common.network.Selector; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import java.net.InetSocketAddress; import java.util.List; import java.util.Optional; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; /** * An {@code EventHandler} that uses a single background thread to consume {@code ApplicationEvent} and produce * {@code BackgroundEvent} from the {@ConsumerBackgroundThread}. */ public class DefaultEventHandler implements EventHandler { private static final String METRIC_GRP_PREFIX = "consumer"; private final BlockingQueue<ApplicationEvent> applicationEventQueue; private final BlockingQueue<BackgroundEvent> backgroundEventQueue; private final DefaultBackgroundThread backgroundThread; public DefaultEventHandler(final ConsumerConfig config, final GroupRebalanceConfig groupRebalanceConfig, final LogContext logContext, final SubscriptionState subscriptionState, final ApiVersions apiVersions, final Metrics metrics, final ClusterResourceListeners clusterResourceListeners, final Sensor fetcherThrottleTimeSensor) { this(Time.SYSTEM, config, groupRebalanceConfig, logContext, new LinkedBlockingQueue<>(), new LinkedBlockingQueue<>(), subscriptionState, apiVersions, metrics, clusterResourceListeners, fetcherThrottleTimeSensor); } public DefaultEventHandler(final Time time, final ConsumerConfig config, final GroupRebalanceConfig groupRebalanceConfig, final LogContext logContext, final BlockingQueue<ApplicationEvent> applicationEventQueue, final BlockingQueue<BackgroundEvent> backgroundEventQueue, final SubscriptionState subscriptionState, final ApiVersions apiVersions, final Metrics metrics, final ClusterResourceListeners clusterResourceListeners, final Sensor fetcherThrottleTimeSensor) { this.applicationEventQueue = applicationEventQueue; this.backgroundEventQueue = backgroundEventQueue; final ConsumerMetadata metadata = bootstrapMetadata( logContext, clusterResourceListeners, config, subscriptionState ); final ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config, time, logContext); final Selector selector = new Selector( config.getLong( ConsumerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, METRIC_GRP_PREFIX, channelBuilder, logContext ); final NetworkClient networkClient = new NetworkClient( selector, metadata, config.getString(ConsumerConfig.CLIENT_ID_CONFIG), 100, // a fixed large enough value will suffice for max // in-flight requests config.getLong(ConsumerConfig.RECONNECT_BACKOFF_MS_CONFIG), config.getLong(ConsumerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG), config.getInt(ConsumerConfig.SEND_BUFFER_CONFIG), config.getInt(ConsumerConfig.RECEIVE_BUFFER_CONFIG), config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG), config.getLong(ConsumerConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG), config.getLong(ConsumerConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG), time, true, apiVersions, fetcherThrottleTimeSensor, logContext ); this.backgroundThread = new DefaultBackgroundThread( time, config, groupRebalanceConfig, logContext, this.applicationEventQueue, this.backgroundEventQueue, metadata, networkClient); this.backgroundThread.start(); } // VisibleForTesting DefaultEventHandler(final Time time, final ConsumerConfig config, final GroupRebalanceConfig groupRebalanceConfig, final LogContext logContext, final BlockingQueue<ApplicationEvent> applicationEventQueue, final BlockingQueue<BackgroundEvent> backgroundEventQueue, final ConsumerMetadata metadata, final KafkaClient networkClient) { this.applicationEventQueue = applicationEventQueue; this.backgroundEventQueue = backgroundEventQueue; this.backgroundThread = new DefaultBackgroundThread( time, config, groupRebalanceConfig, logContext, this.applicationEventQueue, this.backgroundEventQueue, metadata, networkClient); backgroundThread.start(); } // VisibleForTesting DefaultEventHandler(final DefaultBackgroundThread backgroundThread, final BlockingQueue<ApplicationEvent> applicationEventQueue, final BlockingQueue<BackgroundEvent> backgroundEventQueue) { this.backgroundThread = backgroundThread; this.applicationEventQueue = applicationEventQueue; this.backgroundEventQueue = backgroundEventQueue; backgroundThread.start(); } @Override public Optional<BackgroundEvent> poll() { return Optional.ofNullable(backgroundEventQueue.poll()); } @Override public boolean isEmpty() { return backgroundEventQueue.isEmpty(); } @Override public boolean add(final ApplicationEvent event) { backgroundThread.wakeup(); return applicationEventQueue.add(event); } // bootstrap a metadata object with the bootstrap server IP address, // which will be used once for the subsequent metadata refresh once the // background thread has started up. private ConsumerMetadata bootstrapMetadata( final LogContext logContext, final ClusterResourceListeners clusterResourceListeners, final ConsumerConfig config, final SubscriptionState subscriptions) { final ConsumerMetadata metadata = new ConsumerMetadata( config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG), config.getLong(ConsumerConfig.METADATA_MAX_AGE_CONFIG), !config.getBoolean(ConsumerConfig.EXCLUDE_INTERNAL_TOPICS_CONFIG), config.getBoolean(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG), subscriptions, logContext, clusterResourceListeners); final List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses( config.getList(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG), config.getString(ConsumerConfig.CLIENT_DNS_LOOKUP_CONFIG)); metadata.bootstrap(addresses); return metadata; } public void close() { try { backgroundThread.close(); } catch (final Exception e) { throw new RuntimeException(e); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/ErrorEventHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; import org.apache.kafka.clients.consumer.internals.events.ErrorBackgroundEvent; import java.util.Queue; public class ErrorEventHandler { private final Queue<BackgroundEvent> backgroundEventQueue; public ErrorEventHandler(Queue<BackgroundEvent> backgroundEventQueue) { this.backgroundEventQueue = backgroundEventQueue; } public void handle(Throwable e) { backgroundEventQueue.add(new ErrorBackgroundEvent(e)); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/Fetch.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.TopicPartition; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; public class Fetch<K, V> { private final Map<TopicPartition, List<ConsumerRecord<K, V>>> records; private boolean positionAdvanced; private int numRecords; public static <K, V> Fetch<K, V> empty() { return new Fetch<>(new HashMap<>(), false, 0); } public static <K, V> Fetch<K, V> forPartition( TopicPartition partition, List<ConsumerRecord<K, V>> records, boolean positionAdvanced ) { Map<TopicPartition, List<ConsumerRecord<K, V>>> recordsMap = records.isEmpty() ? new HashMap<>() : mkMap(mkEntry(partition, records)); return new Fetch<>(recordsMap, positionAdvanced, records.size()); } private Fetch( Map<TopicPartition, List<ConsumerRecord<K, V>>> records, boolean positionAdvanced, int numRecords ) { this.records = records; this.positionAdvanced = positionAdvanced; this.numRecords = numRecords; } /** * Add another {@link Fetch} to this one; all of its records will be added to this fetch's * {@link #records()} records}, and if the other fetch * {@link #positionAdvanced() advanced the consume position for any topic partition}, * this fetch will be marked as having advanced the consume position as well. * @param fetch the other fetch to add; may not be null */ public void add(Fetch<K, V> fetch) { Objects.requireNonNull(fetch); addRecords(fetch.records); this.positionAdvanced |= fetch.positionAdvanced; } /** * @return all of the non-control messages for this fetch, grouped by partition */ public Map<TopicPartition, List<ConsumerRecord<K, V>>> records() { return Collections.unmodifiableMap(records); } /** * @return whether the fetch caused the consumer's * {@link org.apache.kafka.clients.consumer.KafkaConsumer#position(TopicPartition) position} to advance for at * least one of the topic partitions in this fetch */ public boolean positionAdvanced() { return positionAdvanced; } /** * @return the total number of non-control messages for this fetch, across all partitions */ public int numRecords() { return numRecords; } /** * @return {@code true} if and only if this fetch did not return any user-visible (i.e., non-control) records, and * did not cause the consumer position to advance for any topic partitions */ public boolean isEmpty() { return numRecords == 0 && !positionAdvanced; } private void addRecords(Map<TopicPartition, List<ConsumerRecord<K, V>>> records) { records.forEach((partition, partRecords) -> { this.numRecords += partRecords.size(); List<ConsumerRecord<K, V>> currentRecords = this.records.get(partition); if (currentRecords == null) { this.records.put(partition, partRecords); } else { // this case shouldn't usually happen because we only send one fetch at a time per partition, // but it might conceivably happen in some rare cases (such as partition leader changes). // we have to copy to a new list because the old one may be immutable List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(currentRecords.size() + partRecords.size()); newRecords.addAll(currentRecords); newRecords.addAll(partRecords); this.records.put(partition, newRecords); } }); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/FetchConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.serialization.Deserializer; import java.util.Objects; /** * {@link FetchConfig} represents the static configuration for fetching records from Kafka. It is simply a way * to bundle the immutable settings that were presented at the time the {@link Consumer} was created for later use by * classes like {@link Fetcher}, {@link CompletedFetch}, etc. * * <p/> * * In most cases, the values stored and returned by {@link FetchConfig} will be those stored in the following * {@link ConsumerConfig consumer configuration} settings: * * <ul> * <li>{@link #minBytes}: {@link ConsumerConfig#FETCH_MIN_BYTES_CONFIG}</li> * <li>{@link #maxBytes}: {@link ConsumerConfig#FETCH_MAX_BYTES_CONFIG}</li> * <li>{@link #maxWaitMs}: {@link ConsumerConfig#FETCH_MAX_WAIT_MS_CONFIG}</li> * <li>{@link #fetchSize}: {@link ConsumerConfig#MAX_PARTITION_FETCH_BYTES_CONFIG}</li> * <li>{@link #maxPollRecords}: {@link ConsumerConfig#MAX_POLL_RECORDS_CONFIG}</li> * <li>{@link #checkCrcs}: {@link ConsumerConfig#CHECK_CRCS_CONFIG}</li> * <li>{@link #clientRackId}: {@link ConsumerConfig#CLIENT_RACK_CONFIG}</li> * <li>{@link #keyDeserializer}: {@link ConsumerConfig#KEY_DESERIALIZER_CLASS_CONFIG}</li> * <li>{@link #valueDeserializer}: {@link ConsumerConfig#VALUE_DESERIALIZER_CLASS_CONFIG}</li> * <li>{@link #isolationLevel}: {@link ConsumerConfig#ISOLATION_LEVEL_CONFIG}</li> * </ul> * * However, there are places in the code where additional logic is used to determine these fetch-related configuration * values. In those cases, the values are calculated outside of this class and simply passed in when constructed. * * <p/> * * Note: the {@link Deserializer deserializers} used for the key and value are not closed by this class. They should be * closed by the creator of the {@link FetchConfig}. * * @param <K> Type used to {@link Deserializer deserialize} the message/record key * @param <V> Type used to {@link Deserializer deserialize} the message/record value */ public class FetchConfig<K, V> { final int minBytes; final int maxBytes; final int maxWaitMs; final int fetchSize; final int maxPollRecords; final boolean checkCrcs; final String clientRackId; final Deserializer<K> keyDeserializer; final Deserializer<V> valueDeserializer; final IsolationLevel isolationLevel; public FetchConfig(int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, String clientRackId, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, IsolationLevel isolationLevel) { this.minBytes = minBytes; this.maxBytes = maxBytes; this.maxWaitMs = maxWaitMs; this.fetchSize = fetchSize; this.maxPollRecords = maxPollRecords; this.checkCrcs = checkCrcs; this.clientRackId = clientRackId; this.keyDeserializer = Objects.requireNonNull(keyDeserializer, "Message key deserializer provided to FetchConfig should not be null"); this.valueDeserializer = Objects.requireNonNull(valueDeserializer, "Message value deserializer provided to FetchConfig should not be null"); this.isolationLevel = isolationLevel; } public FetchConfig(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, IsolationLevel isolationLevel) { this.minBytes = config.getInt(ConsumerConfig.FETCH_MIN_BYTES_CONFIG); this.maxBytes = config.getInt(ConsumerConfig.FETCH_MAX_BYTES_CONFIG); this.maxWaitMs = config.getInt(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG); this.fetchSize = config.getInt(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG); this.maxPollRecords = config.getInt(ConsumerConfig.MAX_POLL_RECORDS_CONFIG); this.checkCrcs = config.getBoolean(ConsumerConfig.CHECK_CRCS_CONFIG); this.clientRackId = config.getString(ConsumerConfig.CLIENT_RACK_CONFIG); this.keyDeserializer = Objects.requireNonNull(keyDeserializer, "Message key deserializer provided to FetchConfig should not be null"); this.valueDeserializer = Objects.requireNonNull(valueDeserializer, "Message value deserializer provided to FetchConfig should not be null"); this.isolationLevel = isolationLevel; } @Override public String toString() { return "FetchConfig{" + "minBytes=" + minBytes + ", maxBytes=" + maxBytes + ", maxWaitMs=" + maxWaitMs + ", fetchSize=" + fetchSize + ", maxPollRecords=" + maxPollRecords + ", checkCrcs=" + checkCrcs + ", clientRackId='" + clientRackId + '\'' + ", keyDeserializer=" + keyDeserializer + ", valueDeserializer=" + valueDeserializer + ", isolationLevel=" + isolationLevel + '}'; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/FetchMetricsAggregator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.common.TopicPartition; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; /** * Since we parse the message data for each partition from each fetch response lazily, fetch-level * metrics need to be aggregated as the messages from each partition are parsed. This class is used * to facilitate this incremental aggregation. */ class FetchMetricsAggregator { private final FetchMetricsManager metricsManager; private final Set<TopicPartition> unrecordedPartitions; private final FetchMetrics fetchFetchMetrics = new FetchMetrics(); private final Map<String, FetchMetrics> perTopicFetchMetrics = new HashMap<>(); FetchMetricsAggregator(FetchMetricsManager metricsManager, Set<TopicPartition> partitions) { this.metricsManager = metricsManager; this.unrecordedPartitions = new HashSet<>(partitions); } /** * After each partition is parsed, we update the current metric totals with the total bytes * and number of records parsed. After all partitions have reported, we write the metric. */ void record(TopicPartition partition, int bytes, int records) { // Aggregate the metrics at the fetch level fetchFetchMetrics.increment(bytes, records); // Also aggregate the metrics on a per-topic basis. perTopicFetchMetrics.computeIfAbsent(partition.topic(), t -> new FetchMetrics()) .increment(bytes, records); maybeRecordMetrics(partition); } /** * Once we've detected that all of the {@link TopicPartition partitions} for the fetch have been handled, we * can then record the aggregated metrics values. This is done at the fetch level and on a per-topic basis. * * @param partition {@link TopicPartition} */ private void maybeRecordMetrics(TopicPartition partition) { unrecordedPartitions.remove(partition); if (!unrecordedPartitions.isEmpty()) return; // Record the metrics aggregated at the fetch level. metricsManager.recordBytesFetched(fetchFetchMetrics.bytes); metricsManager.recordRecordsFetched(fetchFetchMetrics.records); // Also record the metrics aggregated on a per-topic basis. for (Map.Entry<String, FetchMetrics> entry: perTopicFetchMetrics.entrySet()) { String topic = entry.getKey(); FetchMetrics fetchMetrics = entry.getValue(); metricsManager.recordBytesFetched(topic, fetchMetrics.bytes); metricsManager.recordRecordsFetched(topic, fetchMetrics.records); } } private static class FetchMetrics { private int bytes; private int records; private void increment(int bytes, int records) { this.bytes += bytes; this.records += records; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/FetchMetricsManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.metrics.Gauge; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.WindowedCount; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; /** * The {@link FetchMetricsManager} class provides wrapper methods to record lag, lead, latency, and fetch metrics. * It keeps an internal ID of the assigned set of partitions which is updated to ensure the set of metrics it * records matches up with the topic-partitions in use. */ public class FetchMetricsManager { private final Metrics metrics; private final FetchMetricsRegistry metricsRegistry; private final Sensor throttleTime; private final Sensor bytesFetched; private final Sensor recordsFetched; private final Sensor fetchLatency; private final Sensor recordsLag; private final Sensor recordsLead; private int assignmentId = 0; private Set<TopicPartition> assignedPartitions = Collections.emptySet(); public FetchMetricsManager(Metrics metrics, FetchMetricsRegistry metricsRegistry) { this.metrics = metrics; this.metricsRegistry = metricsRegistry; this.throttleTime = new SensorBuilder(metrics, "fetch-throttle-time") .withAvg(metricsRegistry.fetchThrottleTimeAvg) .withMax(metricsRegistry.fetchThrottleTimeMax) .build(); this.bytesFetched = new SensorBuilder(metrics, "bytes-fetched") .withAvg(metricsRegistry.fetchSizeAvg) .withMax(metricsRegistry.fetchSizeMax) .withMeter(metricsRegistry.bytesConsumedRate, metricsRegistry.bytesConsumedTotal) .build(); this.recordsFetched = new SensorBuilder(metrics, "records-fetched") .withAvg(metricsRegistry.recordsPerRequestAvg) .withMeter(metricsRegistry.recordsConsumedRate, metricsRegistry.recordsConsumedTotal) .build(); this.fetchLatency = new SensorBuilder(metrics, "fetch-latency") .withAvg(metricsRegistry.fetchLatencyAvg) .withMax(metricsRegistry.fetchLatencyMax) .withMeter(new WindowedCount(), metricsRegistry.fetchRequestRate, metricsRegistry.fetchRequestTotal) .build(); this.recordsLag = new SensorBuilder(metrics, "records-lag") .withMax(metricsRegistry.recordsLagMax) .build(); this.recordsLead = new SensorBuilder(metrics, "records-lead") .withMin(metricsRegistry.recordsLeadMin) .build(); } public Sensor throttleTimeSensor() { return throttleTime; } void recordLatency(long requestLatencyMs) { fetchLatency.record(requestLatencyMs); } void recordBytesFetched(int bytes) { bytesFetched.record(bytes); } void recordRecordsFetched(int records) { recordsFetched.record(records); } void recordBytesFetched(String topic, int bytes) { String name = topicBytesFetchedMetricName(topic); Sensor bytesFetched = new SensorBuilder(metrics, name, () -> topicTags(topic)) .withAvg(metricsRegistry.topicFetchSizeAvg) .withMax(metricsRegistry.topicFetchSizeMax) .withMeter(metricsRegistry.topicBytesConsumedRate, metricsRegistry.topicBytesConsumedTotal) .build(); bytesFetched.record(bytes); } void recordRecordsFetched(String topic, int records) { String name = topicRecordsFetchedMetricName(topic); Sensor recordsFetched = new SensorBuilder(metrics, name, () -> topicTags(topic)) .withAvg(metricsRegistry.topicRecordsPerRequestAvg) .withMeter(metricsRegistry.topicRecordsConsumedRate, metricsRegistry.topicRecordsConsumedTotal) .build(); recordsFetched.record(records); } void recordPartitionLag(TopicPartition tp, long lag) { this.recordsLag.record(lag); String name = partitionRecordsLagMetricName(tp); Sensor recordsLag = new SensorBuilder(metrics, name, () -> topicPartitionTags(tp)) .withValue(metricsRegistry.partitionRecordsLag) .withMax(metricsRegistry.partitionRecordsLagMax) .withAvg(metricsRegistry.partitionRecordsLagAvg) .build(); recordsLag.record(lag); } void recordPartitionLead(TopicPartition tp, long lead) { this.recordsLead.record(lead); String name = partitionRecordsLeadMetricName(tp); Sensor recordsLead = new SensorBuilder(metrics, name, () -> topicPartitionTags(tp)) .withValue(metricsRegistry.partitionRecordsLead) .withMin(metricsRegistry.partitionRecordsLeadMin) .withAvg(metricsRegistry.partitionRecordsLeadAvg) .build(); recordsLead.record(lead); } /** * This method is called by the {@link Fetch fetch} logic before it requests fetches in order to update the * internal set of metrics that are tracked. * * @param subscription {@link SubscriptionState} that contains the set of assigned partitions * @see SubscriptionState#assignmentId() */ void maybeUpdateAssignment(SubscriptionState subscription) { int newAssignmentId = subscription.assignmentId(); if (this.assignmentId != newAssignmentId) { Set<TopicPartition> newAssignedPartitions = subscription.assignedPartitions(); for (TopicPartition tp : this.assignedPartitions) { if (!newAssignedPartitions.contains(tp)) { metrics.removeSensor(partitionRecordsLagMetricName(tp)); metrics.removeSensor(partitionRecordsLeadMetricName(tp)); metrics.removeMetric(partitionPreferredReadReplicaMetricName(tp)); } } for (TopicPartition tp : newAssignedPartitions) { if (!this.assignedPartitions.contains(tp)) { MetricName metricName = partitionPreferredReadReplicaMetricName(tp); metrics.addMetricIfAbsent( metricName, null, (Gauge<Integer>) (config, now) -> subscription.preferredReadReplica(tp, 0L).orElse(-1) ); } } this.assignedPartitions = newAssignedPartitions; this.assignmentId = newAssignmentId; } } private static String topicBytesFetchedMetricName(String topic) { return "topic." + topic + ".bytes-fetched"; } private static String topicRecordsFetchedMetricName(String topic) { return "topic." + topic + ".records-fetched"; } private static String partitionRecordsLeadMetricName(TopicPartition tp) { return tp + ".records-lead"; } private static String partitionRecordsLagMetricName(TopicPartition tp) { return tp + ".records-lag"; } private MetricName partitionPreferredReadReplicaMetricName(TopicPartition tp) { Map<String, String> metricTags = topicPartitionTags(tp); return this.metrics.metricInstance(metricsRegistry.partitionPreferredReadReplica, metricTags); } static Map<String, String> topicTags(String topic) { Map<String, String> metricTags = new HashMap<>(1); metricTags.put("topic", topic.replace('.', '_')); return metricTags; } static Map<String, String> topicPartitionTags(TopicPartition tp) { Map<String, String> metricTags = new HashMap<>(2); metricTags.put("topic", tp.topic().replace('.', '_')); metricTags.put("partition", String.valueOf(tp.partition())); return metricTags; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import java.util.Arrays; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; import org.apache.kafka.common.MetricNameTemplate; public class FetchMetricsRegistry { public MetricNameTemplate fetchSizeAvg; public MetricNameTemplate fetchSizeMax; public MetricNameTemplate bytesConsumedRate; public MetricNameTemplate bytesConsumedTotal; public MetricNameTemplate recordsPerRequestAvg; public MetricNameTemplate recordsConsumedRate; public MetricNameTemplate recordsConsumedTotal; public MetricNameTemplate fetchLatencyAvg; public MetricNameTemplate fetchLatencyMax; public MetricNameTemplate fetchRequestRate; public MetricNameTemplate fetchRequestTotal; public MetricNameTemplate recordsLagMax; public MetricNameTemplate recordsLeadMin; public MetricNameTemplate fetchThrottleTimeAvg; public MetricNameTemplate fetchThrottleTimeMax; public MetricNameTemplate topicFetchSizeAvg; public MetricNameTemplate topicFetchSizeMax; public MetricNameTemplate topicBytesConsumedRate; public MetricNameTemplate topicBytesConsumedTotal; public MetricNameTemplate topicRecordsPerRequestAvg; public MetricNameTemplate topicRecordsConsumedRate; public MetricNameTemplate topicRecordsConsumedTotal; public MetricNameTemplate partitionRecordsLag; public MetricNameTemplate partitionRecordsLagMax; public MetricNameTemplate partitionRecordsLagAvg; public MetricNameTemplate partitionRecordsLead; public MetricNameTemplate partitionRecordsLeadMin; public MetricNameTemplate partitionRecordsLeadAvg; public MetricNameTemplate partitionPreferredReadReplica; public FetchMetricsRegistry() { this(new HashSet<String>(), ""); } public FetchMetricsRegistry(String metricGrpPrefix) { this(new HashSet<String>(), metricGrpPrefix); } public FetchMetricsRegistry(Set<String> tags, String metricGrpPrefix) { /***** Client level *****/ String groupName = metricGrpPrefix + "-fetch-manager-metrics"; this.fetchSizeAvg = new MetricNameTemplate("fetch-size-avg", groupName, "The average number of bytes fetched per request", tags); this.fetchSizeMax = new MetricNameTemplate("fetch-size-max", groupName, "The maximum number of bytes fetched per request", tags); this.bytesConsumedRate = new MetricNameTemplate("bytes-consumed-rate", groupName, "The average number of bytes consumed per second", tags); this.bytesConsumedTotal = new MetricNameTemplate("bytes-consumed-total", groupName, "The total number of bytes consumed", tags); this.recordsPerRequestAvg = new MetricNameTemplate("records-per-request-avg", groupName, "The average number of records in each request", tags); this.recordsConsumedRate = new MetricNameTemplate("records-consumed-rate", groupName, "The average number of records consumed per second", tags); this.recordsConsumedTotal = new MetricNameTemplate("records-consumed-total", groupName, "The total number of records consumed", tags); this.fetchLatencyAvg = new MetricNameTemplate("fetch-latency-avg", groupName, "The average time taken for a fetch request.", tags); this.fetchLatencyMax = new MetricNameTemplate("fetch-latency-max", groupName, "The max time taken for any fetch request.", tags); this.fetchRequestRate = new MetricNameTemplate("fetch-rate", groupName, "The number of fetch requests per second.", tags); this.fetchRequestTotal = new MetricNameTemplate("fetch-total", groupName, "The total number of fetch requests.", tags); this.recordsLagMax = new MetricNameTemplate("records-lag-max", groupName, "The maximum lag in terms of number of records for any partition in this window. NOTE: This is based on current offset and not committed offset", tags); this.recordsLeadMin = new MetricNameTemplate("records-lead-min", groupName, "The minimum lead in terms of number of records for any partition in this window", tags); this.fetchThrottleTimeAvg = new MetricNameTemplate("fetch-throttle-time-avg", groupName, "The average throttle time in ms", tags); this.fetchThrottleTimeMax = new MetricNameTemplate("fetch-throttle-time-max", groupName, "The maximum throttle time in ms", tags); /***** Topic level *****/ Set<String> topicTags = new LinkedHashSet<>(tags); topicTags.add("topic"); this.topicFetchSizeAvg = new MetricNameTemplate("fetch-size-avg", groupName, "The average number of bytes fetched per request for a topic", topicTags); this.topicFetchSizeMax = new MetricNameTemplate("fetch-size-max", groupName, "The maximum number of bytes fetched per request for a topic", topicTags); this.topicBytesConsumedRate = new MetricNameTemplate("bytes-consumed-rate", groupName, "The average number of bytes consumed per second for a topic", topicTags); this.topicBytesConsumedTotal = new MetricNameTemplate("bytes-consumed-total", groupName, "The total number of bytes consumed for a topic", topicTags); this.topicRecordsPerRequestAvg = new MetricNameTemplate("records-per-request-avg", groupName, "The average number of records in each request for a topic", topicTags); this.topicRecordsConsumedRate = new MetricNameTemplate("records-consumed-rate", groupName, "The average number of records consumed per second for a topic", topicTags); this.topicRecordsConsumedTotal = new MetricNameTemplate("records-consumed-total", groupName, "The total number of records consumed for a topic", topicTags); /***** Partition level *****/ Set<String> partitionTags = new HashSet<>(topicTags); partitionTags.add("partition"); this.partitionRecordsLag = new MetricNameTemplate("records-lag", groupName, "The latest lag of the partition", partitionTags); this.partitionRecordsLagMax = new MetricNameTemplate("records-lag-max", groupName, "The max lag of the partition", partitionTags); this.partitionRecordsLagAvg = new MetricNameTemplate("records-lag-avg", groupName, "The average lag of the partition", partitionTags); this.partitionRecordsLead = new MetricNameTemplate("records-lead", groupName, "The latest lead of the partition", partitionTags); this.partitionRecordsLeadMin = new MetricNameTemplate("records-lead-min", groupName, "The min lead of the partition", partitionTags); this.partitionRecordsLeadAvg = new MetricNameTemplate("records-lead-avg", groupName, "The average lead of the partition", partitionTags); this.partitionPreferredReadReplica = new MetricNameTemplate( "preferred-read-replica", "consumer-fetch-manager-metrics", "The current read replica for the partition, or -1 if reading from leader", partitionTags); } public List<MetricNameTemplate> getAllTemplates() { return Arrays.asList( fetchSizeAvg, fetchSizeMax, bytesConsumedRate, bytesConsumedTotal, recordsPerRequestAvg, recordsConsumedRate, recordsConsumedTotal, fetchLatencyAvg, fetchLatencyMax, fetchRequestRate, fetchRequestTotal, recordsLagMax, recordsLeadMin, fetchThrottleTimeAvg, fetchThrottleTimeMax, topicFetchSizeAvg, topicFetchSizeMax, topicBytesConsumedRate, topicBytesConsumedTotal, topicRecordsPerRequestAvg, topicRecordsConsumedRate, topicRecordsConsumedTotal, partitionRecordsLag, partitionRecordsLagAvg, partitionRecordsLagMax, partitionRecordsLead, partitionRecordsLeadMin, partitionRecordsLeadAvg, partitionPreferredReadReplica ); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/Fetcher.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.FetchSessionHandler; import org.apache.kafka.common.Node; import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.slf4j.Logger; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; /** * This class manages the fetching process with the brokers. * <p> * Thread-safety: * Requests and responses of Fetcher may be processed by different threads since heartbeat * thread may process responses. Other operations are single-threaded and invoked only from * the thread polling the consumer. * <ul> * <li>If a response handler accesses any shared state of the Fetcher (e.g. FetchSessionHandler), * all access to that state must be synchronized on the Fetcher instance.</li> * <li>If a response handler accesses any shared state of the coordinator (e.g. SubscriptionState), * it is assumed that all access to that state is synchronized on the coordinator instance by * the caller.</li> * <li>At most one request is pending for each node at any time. Nodes with pending requests are * tracked and updated after processing the response. This ensures that any state (e.g. epoch) * updated while processing responses on one thread are visible while creating the subsequent request * on a different thread.</li> * </ul> */ public class Fetcher<K, V> extends AbstractFetch<K, V> { private final Logger log; private final AtomicBoolean isClosed = new AtomicBoolean(false); public Fetcher(LogContext logContext, ConsumerNetworkClient client, ConsumerMetadata metadata, SubscriptionState subscriptions, FetchConfig<K, V> fetchConfig, FetchMetricsManager metricsManager, Time time) { super(logContext, client, metadata, subscriptions, fetchConfig, metricsManager, time); this.log = logContext.logger(Fetcher.class); } /** * Set-up a fetch request for any node that we have assigned partitions for which doesn't already have * an in-flight fetch or pending fetch data. * @return number of fetches sent */ public synchronized int sendFetches() { Map<Node, FetchSessionHandler.FetchRequestData> fetchRequestMap = prepareFetchRequests(); for (Map.Entry<Node, FetchSessionHandler.FetchRequestData> entry : fetchRequestMap.entrySet()) { final Node fetchTarget = entry.getKey(); final FetchSessionHandler.FetchRequestData data = entry.getValue(); final FetchRequest.Builder request = createFetchRequest(fetchTarget, data); RequestFutureListener<ClientResponse> listener = new RequestFutureListener<ClientResponse>() { @Override public void onSuccess(ClientResponse resp) { synchronized (Fetcher.this) { handleFetchResponse(fetchTarget, data, resp); } } @Override public void onFailure(RuntimeException e) { synchronized (Fetcher.this) { handleFetchResponse(fetchTarget, e); } } }; final RequestFuture<ClientResponse> future = client.send(fetchTarget, request); future.addListener(listener); } return fetchRequestMap.size(); } public void close(final Timer timer) { if (!isClosed.compareAndSet(false, true)) { log.info("Fetcher {} is already closed.", this); return; } // Shared states (e.g. sessionHandlers) could be accessed by multiple threads (such as heartbeat thread), hence, // it is necessary to acquire a lock on the fetcher instance before modifying the states. synchronized (this) { super.close(timer); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/GroupState.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.common.requests.JoinGroupRequest; import org.apache.kafka.common.requests.OffsetCommitRequest; import java.util.Objects; import java.util.Optional; public class GroupState { public final String groupId; public final Optional<String> groupInstanceId; public Generation generation = Generation.NO_GENERATION; public GroupState(String groupId, Optional<String> groupInstanceId) { this.groupId = groupId; this.groupInstanceId = groupInstanceId; } public GroupState(final GroupRebalanceConfig config) { this.groupId = config.groupId; this.groupInstanceId = config.groupInstanceId; } protected static class Generation { public static final Generation NO_GENERATION = new Generation( OffsetCommitRequest.DEFAULT_GENERATION_ID, JoinGroupRequest.UNKNOWN_MEMBER_ID, null); public final int generationId; public final String memberId; public final String protocolName; public Generation(int generationId, String memberId, String protocolName) { this.generationId = generationId; this.memberId = memberId; this.protocolName = protocolName; } /** * @return true if this generation has a valid member id, false otherwise. A member might have an id before * it becomes part of a group generation. */ public boolean hasMemberId() { return !memberId.isEmpty(); } @Override public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final Generation that = (Generation) o; return generationId == that.generationId && Objects.equals(memberId, that.memberId) && Objects.equals(protocolName, that.protocolName); } @Override public int hashCode() { return Objects.hash(generationId, memberId, protocolName); } @Override public String toString() { return "Generation{" + "generationId=" + generationId + ", memberId='" + memberId + '\'' + ", protocol='" + protocolName + '\'' + '}'; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/Heartbeat.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.slf4j.Logger; /** * A helper class for managing the heartbeat to the coordinator */ public final class Heartbeat { private final int maxPollIntervalMs; private final GroupRebalanceConfig rebalanceConfig; private final Time time; private final Timer heartbeatTimer; private final Timer sessionTimer; private final Timer pollTimer; private final Logger log; private volatile long lastHeartbeatSend = 0L; private volatile boolean heartbeatInFlight = false; public Heartbeat(GroupRebalanceConfig config, Time time) { if (config.heartbeatIntervalMs >= config.sessionTimeoutMs) throw new IllegalArgumentException("Heartbeat must be set lower than the session timeout"); this.rebalanceConfig = config; this.time = time; this.heartbeatTimer = time.timer(config.heartbeatIntervalMs); this.sessionTimer = time.timer(config.sessionTimeoutMs); this.maxPollIntervalMs = config.rebalanceTimeoutMs; this.pollTimer = time.timer(maxPollIntervalMs); final LogContext logContext = new LogContext("[Heartbeat groupID=" + config.groupId + "] "); this.log = logContext.logger(getClass()); } private void update(long now) { heartbeatTimer.update(now); sessionTimer.update(now); pollTimer.update(now); } public void poll(long now) { update(now); pollTimer.reset(maxPollIntervalMs); } boolean hasInflight() { return heartbeatInFlight; } void sentHeartbeat(long now) { lastHeartbeatSend = now; heartbeatInFlight = true; update(now); heartbeatTimer.reset(rebalanceConfig.heartbeatIntervalMs); if (log.isTraceEnabled()) { log.trace("Sending heartbeat request with {}ms remaining on timer", heartbeatTimer.remainingMs()); } } void failHeartbeat() { update(time.milliseconds()); heartbeatInFlight = false; heartbeatTimer.reset(rebalanceConfig.retryBackoffMs); log.trace("Heartbeat failed, reset the timer to {}ms remaining", heartbeatTimer.remainingMs()); } void receiveHeartbeat() { update(time.milliseconds()); heartbeatInFlight = false; sessionTimer.reset(rebalanceConfig.sessionTimeoutMs); } boolean shouldHeartbeat(long now) { update(now); return heartbeatTimer.isExpired(); } long lastHeartbeatSend() { return this.lastHeartbeatSend; } long timeToNextHeartbeat(long now) { update(now); return heartbeatTimer.remainingMs(); } boolean sessionTimeoutExpired(long now) { update(now); return sessionTimer.isExpired(); } void resetTimeouts() { update(time.milliseconds()); sessionTimer.reset(rebalanceConfig.sessionTimeoutMs); pollTimer.reset(maxPollIntervalMs); heartbeatTimer.reset(rebalanceConfig.heartbeatIntervalMs); } void resetSessionTimeout() { update(time.milliseconds()); sessionTimer.reset(rebalanceConfig.sessionTimeoutMs); } boolean pollTimeoutExpired(long now) { update(now); return pollTimer.isExpired(); } long lastPollTime() { return pollTimer.currentTimeMs(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/KafkaConsumerMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.CumulativeSum; import org.apache.kafka.common.metrics.stats.Max; import java.util.concurrent.TimeUnit; public class KafkaConsumerMetrics implements AutoCloseable { private final MetricName lastPollMetricName; private final Sensor timeBetweenPollSensor; private final Sensor pollIdleSensor; private final Sensor committedSensor; private final Sensor commitSyncSensor; private final Metrics metrics; private long lastPollMs; private long pollStartMs; private long timeSinceLastPollMs; public KafkaConsumerMetrics(Metrics metrics, String metricGrpPrefix) { this.metrics = metrics; String metricGroupName = metricGrpPrefix + "-metrics"; Measurable lastPoll = (mConfig, now) -> { if (lastPollMs == 0L) // if no poll is ever triggered, just return -1. return -1d; else return TimeUnit.SECONDS.convert(now - lastPollMs, TimeUnit.MILLISECONDS); }; this.lastPollMetricName = metrics.metricName("last-poll-seconds-ago", metricGroupName, "The number of seconds since the last poll() invocation."); metrics.addMetric(lastPollMetricName, lastPoll); this.timeBetweenPollSensor = metrics.sensor("time-between-poll"); this.timeBetweenPollSensor.add(metrics.metricName("time-between-poll-avg", metricGroupName, "The average delay between invocations of poll() in milliseconds."), new Avg()); this.timeBetweenPollSensor.add(metrics.metricName("time-between-poll-max", metricGroupName, "The max delay between invocations of poll() in milliseconds."), new Max()); this.pollIdleSensor = metrics.sensor("poll-idle-ratio-avg"); this.pollIdleSensor.add(metrics.metricName("poll-idle-ratio-avg", metricGroupName, "The average fraction of time the consumer's poll() is idle as opposed to waiting for the user code to process records."), new Avg()); this.commitSyncSensor = metrics.sensor("commit-sync-time-ns-total"); this.commitSyncSensor.add( metrics.metricName( "commit-sync-time-ns-total", metricGroupName, "The total time the consumer has spent in commitSync in nanoseconds" ), new CumulativeSum() ); this.committedSensor = metrics.sensor("committed-time-ns-total"); this.committedSensor.add( metrics.metricName( "committed-time-ns-total", metricGroupName, "The total time the consumer has spent in committed in nanoseconds" ), new CumulativeSum() ); } public void recordPollStart(long pollStartMs) { this.pollStartMs = pollStartMs; this.timeSinceLastPollMs = lastPollMs != 0L ? pollStartMs - lastPollMs : 0; this.timeBetweenPollSensor.record(timeSinceLastPollMs); this.lastPollMs = pollStartMs; } public void recordPollEnd(long pollEndMs) { long pollTimeMs = pollEndMs - pollStartMs; double pollIdleRatio = pollTimeMs * 1.0 / (pollTimeMs + timeSinceLastPollMs); this.pollIdleSensor.record(pollIdleRatio); } public void recordCommitSync(long duration) { this.commitSyncSensor.record(duration); } public void recordCommitted(long duration) { this.committedSensor.record(duration); } @Override public void close() { metrics.removeMetric(lastPollMetricName); metrics.removeSensor(timeBetweenPollSensor.name()); metrics.removeSensor(pollIdleSensor.name()); metrics.removeSensor(commitSyncSensor.name()); metrics.removeSensor(committedSensor.name()); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ClientRequest; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.RequestCompletionHandler; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.slf4j.Logger; import java.io.IOException; import java.util.ArrayDeque; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.Queue; import java.util.concurrent.CompletableFuture; /** * A wrapper around the {@link org.apache.kafka.clients.NetworkClient} to handle network poll and send operations. */ public class NetworkClientDelegate implements AutoCloseable { private final KafkaClient client; private final Time time; private final Logger log; private final int requestTimeoutMs; private final Queue<UnsentRequest> unsentRequests; private final long retryBackoffMs; public NetworkClientDelegate( final Time time, final ConsumerConfig config, final LogContext logContext, final KafkaClient client) { this.time = time; this.client = client; this.log = logContext.logger(getClass()); this.unsentRequests = new ArrayDeque<>(); this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); } /** * Returns the responses of the sent requests. This method will try to send the unsent requests, poll for responses, * and check the disconnected nodes. * * @param timeoutMs timeout time * @param currentTimeMs current time * @return a list of client response */ public void poll(final long timeoutMs, final long currentTimeMs) { trySend(currentTimeMs); long pollTimeoutMs = timeoutMs; if (!unsentRequests.isEmpty()) { pollTimeoutMs = Math.min(retryBackoffMs, pollTimeoutMs); } this.client.poll(pollTimeoutMs, currentTimeMs); checkDisconnects(); } /** * Tries to send the requests in the unsentRequest queue. If the request doesn't have an assigned node, it will * find the leastLoadedOne, and will be retried in the next {@code poll()}. If the request is expired, a * {@link TimeoutException} will be thrown. */ private void trySend(final long currentTimeMs) { Iterator<UnsentRequest> iterator = unsentRequests.iterator(); while (iterator.hasNext()) { UnsentRequest unsent = iterator.next(); unsent.timer.update(currentTimeMs); if (unsent.timer.isExpired()) { iterator.remove(); unsent.handler.onFailure(new TimeoutException( "Failed to send request after " + unsent.timer.timeoutMs() + " ms.")); continue; } if (!doSend(unsent, currentTimeMs)) { // continue to retry until timeout. continue; } iterator.remove(); } } private boolean doSend(final UnsentRequest r, final long currentTimeMs) { Node node = r.node.orElse(client.leastLoadedNode(currentTimeMs)); if (node == null || nodeUnavailable(node)) { log.debug("No broker available to send the request: {}. Retrying.", r); return false; } ClientRequest request = makeClientRequest(r, node, currentTimeMs); if (!client.ready(node, currentTimeMs)) { // enqueue the request again if the node isn't ready yet. The request will be handled in the next iteration // of the event loop log.debug("Node is not ready, handle the request in the next event loop: node={}, request={}", node, r); return false; } client.send(request, currentTimeMs); return true; } private void checkDisconnects() { // Check the connection of the unsent request. Disconnect the disconnected node if it is unable to be connected. Iterator<UnsentRequest> iter = unsentRequests.iterator(); while (iter.hasNext()) { UnsentRequest u = iter.next(); if (u.node.isPresent() && client.connectionFailed(u.node.get())) { iter.remove(); AuthenticationException authenticationException = client.authenticationException(u.node.get()); u.handler.onFailure(authenticationException); } } } private ClientRequest makeClientRequest( final UnsentRequest unsent, final Node node, final long currentTimeMs ) { return client.newClientRequest( node.idString(), unsent.requestBuilder, currentTimeMs, true, (int) unsent.timer.remainingMs(), unsent.handler ); } public Node leastLoadedNode() { return this.client.leastLoadedNode(time.milliseconds()); } public void send(final UnsentRequest r) { r.setTimer(this.time, this.requestTimeoutMs); unsentRequests.add(r); } public void wakeup() { client.wakeup(); } /** * Check if the code is disconnected and unavailable for immediate reconnection (i.e. if it is in reconnect * backoff window following the disconnect). */ public boolean nodeUnavailable(final Node node) { return client.connectionFailed(node) && client.connectionDelay(node, time.milliseconds()) > 0; } public void close() throws IOException { this.client.close(); } public void addAll(final List<UnsentRequest> requests) { requests.forEach(u -> { u.setTimer(this.time, this.requestTimeoutMs); }); this.unsentRequests.addAll(requests); } public static class PollResult { public final long timeUntilNextPollMs; public final List<UnsentRequest> unsentRequests; public PollResult(final long timeMsTillNextPoll, final List<UnsentRequest> unsentRequests) { this.timeUntilNextPollMs = timeMsTillNextPoll; this.unsentRequests = Collections.unmodifiableList(unsentRequests); } } public static class UnsentRequest { private final AbstractRequest.Builder<?> requestBuilder; private final FutureCompletionHandler handler; private Optional<Node> node; // empty if random node can be choosen private Timer timer; public UnsentRequest( final AbstractRequest.Builder<?> requestBuilder, final Optional<Node> node) { this(requestBuilder, node, new FutureCompletionHandler()); } public UnsentRequest( final AbstractRequest.Builder<?> requestBuilder, final Optional<Node> node, final FutureCompletionHandler handler) { Objects.requireNonNull(requestBuilder); this.requestBuilder = requestBuilder; this.node = node; this.handler = handler; } public void setTimer(final Time time, final long requestTimeoutMs) { this.timer = time.timer(requestTimeoutMs); } CompletableFuture<ClientResponse> future() { return handler.future; } RequestCompletionHandler callback() { return handler; } AbstractRequest.Builder<?> requestBuilder() { return requestBuilder; } @Override public String toString() { return "UnsentRequest(builder=" + requestBuilder + ")"; } } public static class FutureCompletionHandler implements RequestCompletionHandler { private final CompletableFuture<ClientResponse> future; FutureCompletionHandler() { this.future = new CompletableFuture<>(); } public void onFailure(final RuntimeException e) { future.completeExceptionally(e); } public CompletableFuture<ClientResponse> future() { return future; } @Override public void onComplete(final ClientResponse response) { if (response.authenticationException() != null) { onFailure(response.authenticationException()); } else if (response.wasDisconnected()) { onFailure(DisconnectException.INSTANCE); } else if (response.versionMismatch() != null) { onFailure(response.versionMismatch()); } else { future.complete(response); } } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/NoAvailableBrokersException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.common.errors.InvalidMetadataException; /** * No brokers were available to complete a request. */ public class NoAvailableBrokersException extends InvalidMetadataException { private static final long serialVersionUID = 1L; }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/NoOpConsumerRebalanceListener.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.common.TopicPartition; import java.util.Collection; public class NoOpConsumerRebalanceListener implements ConsumerRebalanceListener { @Override public void onPartitionsAssigned(Collection<TopicPartition> partitions) {} @Override public void onPartitionsRevoked(Collection<TopicPartition> partitions) {} }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/NoopBackgroundEvent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; /** * Noop event. Intentionally left it here for demonstration purpose. */ public class NoopBackgroundEvent extends BackgroundEvent { public final String message; public NoopBackgroundEvent(final String message) { super(EventType.NOOP); this.message = message; } @Override public String toString() { return getClass() + "_" + this.message; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.stream.Collectors; import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.NodeApiVersions; import org.apache.kafka.clients.StaleMetadataException; import org.apache.kafka.clients.consumer.LogTruncationException; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.OffsetsForLeaderEpochClient.OffsetForEpochResult; import org.apache.kafka.clients.consumer.internals.SubscriptionState.FetchPosition; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion; import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition; import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse; import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ListOffsetsRequest; import org.apache.kafka.common.requests.ListOffsetsResponse; import org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.slf4j.Logger; /** * {@link OffsetFetcher} is responsible for fetching the {@link OffsetAndTimestamp offsets} for * a given set of {@link TopicPartition topic and partition pairs} and for validation and resetting of positions, * as needed. */ public class OffsetFetcher { private final Logger log; private final ConsumerMetadata metadata; private final SubscriptionState subscriptions; private final ConsumerNetworkClient client; private final Time time; private final long retryBackoffMs; private final long requestTimeoutMs; private final IsolationLevel isolationLevel; private final AtomicReference<RuntimeException> cachedListOffsetsException = new AtomicReference<>(); private final AtomicReference<RuntimeException> cachedOffsetForLeaderException = new AtomicReference<>(); private final OffsetsForLeaderEpochClient offsetsForLeaderEpochClient; private final ApiVersions apiVersions; private final AtomicInteger metadataUpdateVersion = new AtomicInteger(-1); public OffsetFetcher(LogContext logContext, ConsumerNetworkClient client, ConsumerMetadata metadata, SubscriptionState subscriptions, Time time, long retryBackoffMs, long requestTimeoutMs, IsolationLevel isolationLevel, ApiVersions apiVersions) { this.log = logContext.logger(getClass()); this.time = time; this.client = client; this.metadata = metadata; this.subscriptions = subscriptions; this.retryBackoffMs = retryBackoffMs; this.requestTimeoutMs = requestTimeoutMs; this.isolationLevel = isolationLevel; this.apiVersions = apiVersions; this.offsetsForLeaderEpochClient = new OffsetsForLeaderEpochClient(client, logContext); } /** * Represents data about an offset returned by a broker. */ static class ListOffsetData { final long offset; final Long timestamp; // null if the broker does not support returning timestamps final Optional<Integer> leaderEpoch; // empty if the leader epoch is not known ListOffsetData(long offset, Long timestamp, Optional<Integer> leaderEpoch) { this.offset = offset; this.timestamp = timestamp; this.leaderEpoch = leaderEpoch; } } private Long offsetResetStrategyTimestamp(final TopicPartition partition) { OffsetResetStrategy strategy = subscriptions.resetStrategy(partition); if (strategy == OffsetResetStrategy.EARLIEST) return ListOffsetsRequest.EARLIEST_TIMESTAMP; else if (strategy == OffsetResetStrategy.LATEST) return ListOffsetsRequest.LATEST_TIMESTAMP; else return null; } private OffsetResetStrategy timestampToOffsetResetStrategy(long timestamp) { if (timestamp == ListOffsetsRequest.EARLIEST_TIMESTAMP) return OffsetResetStrategy.EARLIEST; else if (timestamp == ListOffsetsRequest.LATEST_TIMESTAMP) return OffsetResetStrategy.LATEST; else return null; } /** * Reset offsets for all assigned partitions that require it. * * @throws org.apache.kafka.clients.consumer.NoOffsetForPartitionException If no offset reset strategy is defined * and one or more partitions aren't awaiting a seekToBeginning() or seekToEnd(). */ public void resetPositionsIfNeeded() { // Raise exception from previous offset fetch if there is one RuntimeException exception = cachedListOffsetsException.getAndSet(null); if (exception != null) throw exception; Set<TopicPartition> partitions = subscriptions.partitionsNeedingReset(time.milliseconds()); if (partitions.isEmpty()) return; final Map<TopicPartition, Long> offsetResetTimestamps = new HashMap<>(); for (final TopicPartition partition : partitions) { Long timestamp = offsetResetStrategyTimestamp(partition); if (timestamp != null) offsetResetTimestamps.put(partition, timestamp); } resetPositionsAsync(offsetResetTimestamps); } /** * Validate offsets for all assigned partitions for which a leader change has been detected. */ public void validatePositionsIfNeeded() { RuntimeException exception = cachedOffsetForLeaderException.getAndSet(null); if (exception != null) throw exception; // Validate each partition against the current leader and epoch // If we see a new metadata version, check all partitions validatePositionsOnMetadataChange(); // Collect positions needing validation, with backoff Map<TopicPartition, FetchPosition> partitionsToValidate = subscriptions .partitionsNeedingValidation(time.milliseconds()) .stream() .filter(tp -> subscriptions.position(tp) != null) .collect(Collectors.toMap(Function.identity(), subscriptions::position)); validatePositionsAsync(partitionsToValidate); } public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch, Timer timer) { metadata.addTransientTopics(topicsForPartitions(timestampsToSearch.keySet())); try { Map<TopicPartition, ListOffsetData> fetchedOffsets = fetchOffsetsByTimes(timestampsToSearch, timer, true).fetchedOffsets; HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(timestampsToSearch.size()); for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) offsetsByTimes.put(entry.getKey(), null); for (Map.Entry<TopicPartition, ListOffsetData> entry : fetchedOffsets.entrySet()) { // 'entry.getValue().timestamp' will not be null since we are guaranteed // to work with a v1 (or later) ListOffset request ListOffsetData offsetData = entry.getValue(); offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(offsetData.offset, offsetData.timestamp, offsetData.leaderEpoch)); } return offsetsByTimes; } finally { metadata.clearTransientTopics(); } } private ListOffsetResult fetchOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, Timer timer, boolean requireTimestamps) { ListOffsetResult result = new ListOffsetResult(); if (timestampsToSearch.isEmpty()) return result; Map<TopicPartition, Long> remainingToSearch = new HashMap<>(timestampsToSearch); do { RequestFuture<ListOffsetResult> future = sendListOffsetsRequests(remainingToSearch, requireTimestamps); future.addListener(new RequestFutureListener<ListOffsetResult>() { @Override public void onSuccess(ListOffsetResult value) { synchronized (future) { result.fetchedOffsets.putAll(value.fetchedOffsets); remainingToSearch.keySet().retainAll(value.partitionsToRetry); for (final Map.Entry<TopicPartition, ListOffsetData> entry: value.fetchedOffsets.entrySet()) { final TopicPartition partition = entry.getKey(); // if the interested partitions are part of the subscriptions, use the returned offset to update // the subscription state as well: // * with read-committed, the returned offset would be LSO; // * with read-uncommitted, the returned offset would be HW; if (subscriptions.isAssigned(partition)) { final long offset = entry.getValue().offset; if (isolationLevel == IsolationLevel.READ_COMMITTED) { log.trace("Updating last stable offset for partition {} to {}", partition, offset); subscriptions.updateLastStableOffset(partition, offset); } else { log.trace("Updating high watermark for partition {} to {}", partition, offset); subscriptions.updateHighWatermark(partition, offset); } } } } } @Override public void onFailure(RuntimeException e) { if (!(e instanceof RetriableException)) { throw future.exception(); } } }); // if timeout is set to zero, do not try to poll the network client at all // and return empty immediately; otherwise try to get the results synchronously // and throw timeout exception if cannot complete in time if (timer.timeoutMs() == 0L) return result; client.poll(future, timer); if (!future.isDone()) { break; } else if (remainingToSearch.isEmpty()) { return result; } else { client.awaitMetadataUpdate(timer); } } while (timer.notExpired()); throw new TimeoutException("Failed to get offsets by times in " + timer.elapsedMs() + "ms"); } public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, Timer timer) { return beginningOrEndOffset(partitions, ListOffsetsRequest.EARLIEST_TIMESTAMP, timer); } public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, Timer timer) { return beginningOrEndOffset(partitions, ListOffsetsRequest.LATEST_TIMESTAMP, timer); } private Map<TopicPartition, Long> beginningOrEndOffset(Collection<TopicPartition> partitions, long timestamp, Timer timer) { metadata.addTransientTopics(topicsForPartitions(partitions)); try { Map<TopicPartition, Long> timestampsToSearch = partitions.stream() .distinct() .collect(Collectors.toMap(Function.identity(), tp -> timestamp)); ListOffsetResult result = fetchOffsetsByTimes(timestampsToSearch, timer, false); return result.fetchedOffsets.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().offset)); } finally { metadata.clearTransientTopics(); } } // Visible for testing void resetPositionIfNeeded(TopicPartition partition, OffsetResetStrategy requestedResetStrategy, ListOffsetData offsetData) { FetchPosition position = new FetchPosition( offsetData.offset, Optional.empty(), // This will ensure we skip validation metadata.currentLeader(partition)); offsetData.leaderEpoch.ifPresent(epoch -> metadata.updateLastSeenEpochIfNewer(partition, epoch)); subscriptions.maybeSeekUnvalidated(partition, position, requestedResetStrategy); } private void resetPositionsAsync(Map<TopicPartition, Long> partitionResetTimestamps) { Map<Node, Map<TopicPartition, ListOffsetsPartition>> timestampsToSearchByNode = groupListOffsetRequests(partitionResetTimestamps, new HashSet<>()); for (Map.Entry<Node, Map<TopicPartition, ListOffsetsPartition>> entry : timestampsToSearchByNode.entrySet()) { Node node = entry.getKey(); final Map<TopicPartition, ListOffsetsPartition> resetTimestamps = entry.getValue(); subscriptions.setNextAllowedRetry(resetTimestamps.keySet(), time.milliseconds() + requestTimeoutMs); RequestFuture<ListOffsetResult> future = sendListOffsetRequest(node, resetTimestamps, false); future.addListener(new RequestFutureListener<ListOffsetResult>() { @Override public void onSuccess(ListOffsetResult result) { if (!result.partitionsToRetry.isEmpty()) { subscriptions.requestFailed(result.partitionsToRetry, time.milliseconds() + retryBackoffMs); metadata.requestUpdate(); } for (Map.Entry<TopicPartition, ListOffsetData> fetchedOffset : result.fetchedOffsets.entrySet()) { TopicPartition partition = fetchedOffset.getKey(); ListOffsetData offsetData = fetchedOffset.getValue(); ListOffsetsPartition requestedReset = resetTimestamps.get(partition); resetPositionIfNeeded(partition, timestampToOffsetResetStrategy(requestedReset.timestamp()), offsetData); } } @Override public void onFailure(RuntimeException e) { subscriptions.requestFailed(resetTimestamps.keySet(), time.milliseconds() + retryBackoffMs); metadata.requestUpdate(); if (!(e instanceof RetriableException) && !cachedListOffsetsException.compareAndSet(null, e)) log.error("Discarding error in ListOffsetResponse because another error is pending", e); } }); } } static boolean hasUsableOffsetForLeaderEpochVersion(NodeApiVersions nodeApiVersions) { ApiVersion apiVersion = nodeApiVersions.apiVersion(ApiKeys.OFFSET_FOR_LEADER_EPOCH); if (apiVersion == null) return false; return OffsetsForLeaderEpochRequest.supportsTopicPermission(apiVersion.maxVersion()); } /** * For each partition which needs validation, make an asynchronous request to get the end-offsets for the partition * with the epoch less than or equal to the epoch the partition last saw. * * <p/> * * Requests are grouped by Node for efficiency. */ private void validatePositionsAsync(Map<TopicPartition, FetchPosition> partitionsToValidate) { final Map<Node, Map<TopicPartition, FetchPosition>> regrouped = regroupFetchPositionsByLeader(partitionsToValidate); long nextResetTimeMs = time.milliseconds() + requestTimeoutMs; regrouped.forEach((node, fetchPositions) -> { if (node.isEmpty()) { metadata.requestUpdate(); return; } NodeApiVersions nodeApiVersions = apiVersions.get(node.idString()); if (nodeApiVersions == null) { client.tryConnect(node); return; } if (!hasUsableOffsetForLeaderEpochVersion(nodeApiVersions)) { log.debug("Skipping validation of fetch offsets for partitions {} since the broker does not " + "support the required protocol version (introduced in Kafka 2.3)", fetchPositions.keySet()); for (TopicPartition partition : fetchPositions.keySet()) { subscriptions.completeValidation(partition); } return; } subscriptions.setNextAllowedRetry(fetchPositions.keySet(), nextResetTimeMs); RequestFuture<OffsetForEpochResult> future = offsetsForLeaderEpochClient.sendAsyncRequest(node, fetchPositions); future.addListener(new RequestFutureListener<OffsetForEpochResult>() { @Override public void onSuccess(OffsetForEpochResult offsetsResult) { List<SubscriptionState.LogTruncation> truncations = new ArrayList<>(); if (!offsetsResult.partitionsToRetry().isEmpty()) { subscriptions.setNextAllowedRetry(offsetsResult.partitionsToRetry(), time.milliseconds() + retryBackoffMs); metadata.requestUpdate(); } // For each OffsetsForLeader response, check if the end-offset is lower than our current offset // for the partition. If so, it means we have experienced log truncation and need to reposition // that partition's offset. // // In addition, check whether the returned offset and epoch are valid. If not, then we should reset // its offset if reset policy is configured, or throw out of range exception. offsetsResult.endOffsets().forEach((topicPartition, respEndOffset) -> { FetchPosition requestPosition = fetchPositions.get(topicPartition); Optional<SubscriptionState.LogTruncation> truncationOpt = subscriptions.maybeCompleteValidation(topicPartition, requestPosition, respEndOffset); truncationOpt.ifPresent(truncations::add); }); if (!truncations.isEmpty()) { maybeSetOffsetForLeaderException(buildLogTruncationException(truncations)); } } @Override public void onFailure(RuntimeException e) { subscriptions.requestFailed(fetchPositions.keySet(), time.milliseconds() + retryBackoffMs); metadata.requestUpdate(); if (!(e instanceof RetriableException)) { maybeSetOffsetForLeaderException(e); } } }); }); } private LogTruncationException buildLogTruncationException(List<SubscriptionState.LogTruncation> truncations) { Map<TopicPartition, OffsetAndMetadata> divergentOffsets = new HashMap<>(); Map<TopicPartition, Long> truncatedFetchOffsets = new HashMap<>(); for (SubscriptionState.LogTruncation truncation : truncations) { truncation.divergentOffsetOpt.ifPresent(divergentOffset -> divergentOffsets.put(truncation.topicPartition, divergentOffset)); truncatedFetchOffsets.put(truncation.topicPartition, truncation.fetchPosition.offset); } return new LogTruncationException("Detected truncated partitions: " + truncations, truncatedFetchOffsets, divergentOffsets); } private void maybeSetOffsetForLeaderException(RuntimeException e) { if (!cachedOffsetForLeaderException.compareAndSet(null, e)) { log.error("Discarding error in OffsetsForLeaderEpoch because another error is pending", e); } } /** * Search the offsets by target times for the specified partitions. * * @param timestampsToSearch the mapping between partitions and target time * @param requireTimestamps true if we should fail with an UnsupportedVersionException if the broker does * not support fetching precise timestamps for offsets * @return A response which can be polled to obtain the corresponding timestamps and offsets. */ private RequestFuture<ListOffsetResult> sendListOffsetsRequests(final Map<TopicPartition, Long> timestampsToSearch, final boolean requireTimestamps) { final Set<TopicPartition> partitionsToRetry = new HashSet<>(); Map<Node, Map<TopicPartition, ListOffsetsPartition>> timestampsToSearchByNode = groupListOffsetRequests(timestampsToSearch, partitionsToRetry); if (timestampsToSearchByNode.isEmpty()) return RequestFuture.failure(new StaleMetadataException()); final RequestFuture<ListOffsetResult> listOffsetRequestsFuture = new RequestFuture<>(); final Map<TopicPartition, ListOffsetData> fetchedTimestampOffsets = new HashMap<>(); final AtomicInteger remainingResponses = new AtomicInteger(timestampsToSearchByNode.size()); for (Map.Entry<Node, Map<TopicPartition, ListOffsetsPartition>> entry : timestampsToSearchByNode.entrySet()) { RequestFuture<ListOffsetResult> future = sendListOffsetRequest(entry.getKey(), entry.getValue(), requireTimestamps); future.addListener(new RequestFutureListener<ListOffsetResult>() { @Override public void onSuccess(ListOffsetResult partialResult) { synchronized (listOffsetRequestsFuture) { fetchedTimestampOffsets.putAll(partialResult.fetchedOffsets); partitionsToRetry.addAll(partialResult.partitionsToRetry); if (remainingResponses.decrementAndGet() == 0 && !listOffsetRequestsFuture.isDone()) { ListOffsetResult result = new ListOffsetResult(fetchedTimestampOffsets, partitionsToRetry); listOffsetRequestsFuture.complete(result); } } } @Override public void onFailure(RuntimeException e) { synchronized (listOffsetRequestsFuture) { if (!listOffsetRequestsFuture.isDone()) listOffsetRequestsFuture.raise(e); } } }); } return listOffsetRequestsFuture; } /** * Groups timestamps to search by node for topic partitions in `timestampsToSearch` that have * leaders available. Topic partitions from `timestampsToSearch` that do not have their leader * available are added to `partitionsToRetry` * @param timestampsToSearch The mapping from partitions ot the target timestamps * @param partitionsToRetry A set of topic partitions that will be extended with partitions * that need metadata update or re-connect to the leader. */ private Map<Node, Map<TopicPartition, ListOffsetsPartition>> groupListOffsetRequests( Map<TopicPartition, Long> timestampsToSearch, Set<TopicPartition> partitionsToRetry) { final Map<TopicPartition, ListOffsetsPartition> partitionDataMap = new HashMap<>(); for (Map.Entry<TopicPartition, Long> entry: timestampsToSearch.entrySet()) { TopicPartition tp = entry.getKey(); Long offset = entry.getValue(); Metadata.LeaderAndEpoch leaderAndEpoch = metadata.currentLeader(tp); if (!leaderAndEpoch.leader.isPresent()) { log.debug("Leader for partition {} is unknown for fetching offset {}", tp, offset); metadata.requestUpdate(); partitionsToRetry.add(tp); } else { Node leader = leaderAndEpoch.leader.get(); if (client.isUnavailable(leader)) { client.maybeThrowAuthFailure(leader); // The connection has failed and we need to await the backoff period before we can // try again. No need to request a metadata update since the disconnect will have // done so already. log.debug("Leader {} for partition {} is unavailable for fetching offset until reconnect backoff expires", leader, tp); partitionsToRetry.add(tp); } else { int currentLeaderEpoch = leaderAndEpoch.epoch.orElse(ListOffsetsResponse.UNKNOWN_EPOCH); partitionDataMap.put(tp, new ListOffsetsPartition() .setPartitionIndex(tp.partition()) .setTimestamp(offset) .setCurrentLeaderEpoch(currentLeaderEpoch)); } } } return regroupPartitionMapByNode(partitionDataMap); } /** * Send the ListOffsetRequest to a specific broker for the partitions and target timestamps. * * @param node The node to send the ListOffsetRequest to. * @param timestampsToSearch The mapping from partitions to the target timestamps. * @param requireTimestamp True if we require a timestamp in the response. * @return A response which can be polled to obtain the corresponding timestamps and offsets. */ private RequestFuture<ListOffsetResult> sendListOffsetRequest(final Node node, final Map<TopicPartition, ListOffsetsPartition> timestampsToSearch, boolean requireTimestamp) { ListOffsetsRequest.Builder builder = ListOffsetsRequest.Builder .forConsumer(requireTimestamp, isolationLevel, false) .setTargetTimes(ListOffsetsRequest.toListOffsetsTopics(timestampsToSearch)); log.debug("Sending ListOffsetRequest {} to broker {}", builder, node); return client.send(node, builder) .compose(new RequestFutureAdapter<ClientResponse, ListOffsetResult>() { @Override public void onSuccess(ClientResponse response, RequestFuture<ListOffsetResult> future) { ListOffsetsResponse lor = (ListOffsetsResponse) response.responseBody(); log.trace("Received ListOffsetResponse {} from broker {}", lor, node); handleListOffsetResponse(lor, future); } }); } /** * Callback for the response of the list offset call above. * @param listOffsetsResponse The response from the server. * @param future The future to be completed when the response returns. Note that any partition-level errors will * generally fail the entire future result. The one exception is UNSUPPORTED_FOR_MESSAGE_FORMAT, * which indicates that the broker does not support the v1 message format. Partitions with this * particular error are simply left out of the future map. Note that the corresponding timestamp * value of each partition may be null only for v0. In v1 and later the ListOffset API would not * return a null timestamp (-1 is returned instead when necessary). */ private void handleListOffsetResponse(ListOffsetsResponse listOffsetsResponse, RequestFuture<ListOffsetResult> future) { Map<TopicPartition, ListOffsetData> fetchedOffsets = new HashMap<>(); Set<TopicPartition> partitionsToRetry = new HashSet<>(); Set<String> unauthorizedTopics = new HashSet<>(); for (ListOffsetsTopicResponse topic : listOffsetsResponse.topics()) { for (ListOffsetsPartitionResponse partition : topic.partitions()) { TopicPartition topicPartition = new TopicPartition(topic.name(), partition.partitionIndex()); Errors error = Errors.forCode(partition.errorCode()); switch (error) { case NONE: if (!partition.oldStyleOffsets().isEmpty()) { // Handle v0 response with offsets long offset; if (partition.oldStyleOffsets().size() > 1) { future.raise(new IllegalStateException("Unexpected partitionData response of length " + partition.oldStyleOffsets().size())); return; } else { offset = partition.oldStyleOffsets().get(0); } log.debug("Handling v0 ListOffsetResponse response for {}. Fetched offset {}", topicPartition, offset); if (offset != ListOffsetsResponse.UNKNOWN_OFFSET) { ListOffsetData offsetData = new ListOffsetData(offset, null, Optional.empty()); fetchedOffsets.put(topicPartition, offsetData); } } else { // Handle v1 and later response or v0 without offsets log.debug("Handling ListOffsetResponse response for {}. Fetched offset {}, timestamp {}", topicPartition, partition.offset(), partition.timestamp()); if (partition.offset() != ListOffsetsResponse.UNKNOWN_OFFSET) { Optional<Integer> leaderEpoch = (partition.leaderEpoch() == ListOffsetsResponse.UNKNOWN_EPOCH) ? Optional.empty() : Optional.of(partition.leaderEpoch()); ListOffsetData offsetData = new ListOffsetData(partition.offset(), partition.timestamp(), leaderEpoch); fetchedOffsets.put(topicPartition, offsetData); } } break; case UNSUPPORTED_FOR_MESSAGE_FORMAT: // The message format on the broker side is before 0.10.0, which means it does not // support timestamps. We treat this case the same as if we weren't able to find an // offset corresponding to the requested timestamp and leave it out of the result. log.debug("Cannot search by timestamp for partition {} because the message format version " + "is before 0.10.0", topicPartition); break; case NOT_LEADER_OR_FOLLOWER: case REPLICA_NOT_AVAILABLE: case KAFKA_STORAGE_ERROR: case OFFSET_NOT_AVAILABLE: case LEADER_NOT_AVAILABLE: case FENCED_LEADER_EPOCH: case UNKNOWN_LEADER_EPOCH: log.debug("Attempt to fetch offsets for partition {} failed due to {}, retrying.", topicPartition, error); partitionsToRetry.add(topicPartition); break; case UNKNOWN_TOPIC_OR_PARTITION: log.warn("Received unknown topic or partition error in ListOffset request for partition {}", topicPartition); partitionsToRetry.add(topicPartition); break; case TOPIC_AUTHORIZATION_FAILED: unauthorizedTopics.add(topicPartition.topic()); break; default: log.warn("Attempt to fetch offsets for partition {} failed due to unexpected exception: {}, retrying.", topicPartition, error.message()); partitionsToRetry.add(topicPartition); } } } if (!unauthorizedTopics.isEmpty()) future.raise(new TopicAuthorizationException(unauthorizedTopics)); else future.complete(new ListOffsetResult(fetchedOffsets, partitionsToRetry)); } static class ListOffsetResult { private final Map<TopicPartition, ListOffsetData> fetchedOffsets; private final Set<TopicPartition> partitionsToRetry; ListOffsetResult(Map<TopicPartition, ListOffsetData> fetchedOffsets, Set<TopicPartition> partitionsNeedingRetry) { this.fetchedOffsets = fetchedOffsets; this.partitionsToRetry = partitionsNeedingRetry; } ListOffsetResult() { this.fetchedOffsets = new HashMap<>(); this.partitionsToRetry = new HashSet<>(); } } /** * If we have seen new metadata (as tracked by {@link org.apache.kafka.clients.Metadata#updateVersion()}), then * we should check that all the assignments have a valid position. */ public void validatePositionsOnMetadataChange() { int newMetadataUpdateVersion = metadata.updateVersion(); if (metadataUpdateVersion.getAndSet(newMetadataUpdateVersion) != newMetadataUpdateVersion) { subscriptions.assignedPartitions().forEach(topicPartition -> { ConsumerMetadata.LeaderAndEpoch leaderAndEpoch = metadata.currentLeader(topicPartition); subscriptions.maybeValidatePositionForCurrentLeader(apiVersions, topicPartition, leaderAndEpoch); }); } } private Map<Node, Map<TopicPartition, FetchPosition>> regroupFetchPositionsByLeader( Map<TopicPartition, FetchPosition> partitionMap) { return partitionMap.entrySet() .stream() .filter(entry -> entry.getValue().currentLeader.leader.isPresent()) .collect(Collectors.groupingBy(entry -> entry.getValue().currentLeader.leader.get(), Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))); } private <T> Map<Node, Map<TopicPartition, T>> regroupPartitionMapByNode(Map<TopicPartition, T> partitionMap) { return partitionMap.entrySet() .stream() .collect(Collectors.groupingBy(entry -> metadata.fetch().leaderFor(entry.getKey()), Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))); } private Set<String> topicsForPartitions(Collection<TopicPartition> partitions) { return partitions.stream().map(TopicPartition::topic).collect(Collectors.toSet()); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/OffsetsForLeaderEpochClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition; import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic; import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopicCollection; import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset; import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.OffsetForLeaderTopicResult; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest; import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse; import org.apache.kafka.common.utils.LogContext; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; /** * Convenience class for making asynchronous requests to the OffsetsForLeaderEpoch API */ public class OffsetsForLeaderEpochClient extends AsyncClient< Map<TopicPartition, SubscriptionState.FetchPosition>, OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse, OffsetsForLeaderEpochClient.OffsetForEpochResult> { OffsetsForLeaderEpochClient(ConsumerNetworkClient client, LogContext logContext) { super(client, logContext); } @Override protected AbstractRequest.Builder<OffsetsForLeaderEpochRequest> prepareRequest( Node node, Map<TopicPartition, SubscriptionState.FetchPosition> requestData) { OffsetForLeaderTopicCollection topics = new OffsetForLeaderTopicCollection(requestData.size()); requestData.forEach((topicPartition, fetchPosition) -> fetchPosition.offsetEpoch.ifPresent(fetchEpoch -> { OffsetForLeaderTopic topic = topics.find(topicPartition.topic()); if (topic == null) { topic = new OffsetForLeaderTopic().setTopic(topicPartition.topic()); topics.add(topic); } topic.partitions().add(new OffsetForLeaderPartition() .setPartition(topicPartition.partition()) .setLeaderEpoch(fetchEpoch) .setCurrentLeaderEpoch(fetchPosition.currentLeader.epoch .orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH)) ); }) ); return OffsetsForLeaderEpochRequest.Builder.forConsumer(topics); } @Override protected OffsetForEpochResult handleResponse( Node node, Map<TopicPartition, SubscriptionState.FetchPosition> requestData, OffsetsForLeaderEpochResponse response) { Set<TopicPartition> partitionsToRetry = new HashSet<>(requestData.keySet()); Set<String> unauthorizedTopics = new HashSet<>(); Map<TopicPartition, EpochEndOffset> endOffsets = new HashMap<>(); for (OffsetForLeaderTopicResult topic : response.data().topics()) { for (EpochEndOffset partition : topic.partitions()) { TopicPartition topicPartition = new TopicPartition(topic.topic(), partition.partition()); if (!requestData.containsKey(topicPartition)) { logger().warn("Received unrequested topic or partition {} from response, ignoring.", topicPartition); continue; } Errors error = Errors.forCode(partition.errorCode()); switch (error) { case NONE: logger().debug("Handling OffsetsForLeaderEpoch response for {}. Got offset {} for epoch {}.", topicPartition, partition.endOffset(), partition.leaderEpoch()); endOffsets.put(topicPartition, partition); partitionsToRetry.remove(topicPartition); break; case NOT_LEADER_OR_FOLLOWER: case REPLICA_NOT_AVAILABLE: case KAFKA_STORAGE_ERROR: case OFFSET_NOT_AVAILABLE: case LEADER_NOT_AVAILABLE: case FENCED_LEADER_EPOCH: case UNKNOWN_LEADER_EPOCH: logger().debug("Attempt to fetch offsets for partition {} failed due to {}, retrying.", topicPartition, error); break; case UNKNOWN_TOPIC_OR_PARTITION: logger().warn("Received unknown topic or partition error in OffsetsForLeaderEpoch request for partition {}.", topicPartition); break; case TOPIC_AUTHORIZATION_FAILED: unauthorizedTopics.add(topicPartition.topic()); partitionsToRetry.remove(topicPartition); break; default: logger().warn("Attempt to fetch offsets for partition {} failed due to: {}, retrying.", topicPartition, error.message()); } } } if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); else return new OffsetForEpochResult(endOffsets, partitionsToRetry); } public static class OffsetForEpochResult { private final Map<TopicPartition, EpochEndOffset> endOffsets; private final Set<TopicPartition> partitionsToRetry; OffsetForEpochResult(Map<TopicPartition, EpochEndOffset> endOffsets, Set<TopicPartition> partitionsNeedingRetry) { this.endOffsets = endOffsets; this.partitionsToRetry = partitionsNeedingRetry; } public Map<TopicPartition, EpochEndOffset> endOffsets() { return endOffsets; } public Set<TopicPartition> partitionsToRetry() { return partitionsToRetry; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/PrototypeAsyncConsumer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.clients.consumer.ConsumerInterceptor; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; import org.apache.kafka.clients.consumer.OffsetCommitCallback; import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; import org.apache.kafka.clients.consumer.internals.events.CommitApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.EventHandler; import org.apache.kafka.clients.consumer.internals.events.OffsetFetchApplicationEvent; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidGroupIdException; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.metrics.KafkaMetricsContext; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.MetricsContext; import org.apache.kafka.common.metrics.MetricsReporter; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import java.time.Duration; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.OptionalLong; import java.util.Properties; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Pattern; import static org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG; /** * This prototype consumer uses the EventHandler to process application * events so that the network IO can be processed in a background thread. Visit * <a href="https://cwiki.apache.org/confluence/display/KAFKA/Proposal%3A+Consumer+Threading+Model+Refactor" >this document</a> * for detail implementation. */ public class PrototypeAsyncConsumer<K, V> implements Consumer<K, V> { private static final String CLIENT_ID_METRIC_TAG = "client-id"; private static final String JMX_PREFIX = "kafka.consumer"; static final long DEFAULT_CLOSE_TIMEOUT_MS = 30 * 1000; private final LogContext logContext; private final EventHandler eventHandler; private final Time time; private final Optional<String> groupId; private final String clientId; private final Logger log; private final SubscriptionState subscriptions; private final Metrics metrics; private final long defaultApiTimeoutMs; public PrototypeAsyncConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer) { this(Utils.propsToMap(properties), keyDeserializer, valueDeserializer); } public PrototypeAsyncConsumer(final Map<String, Object> configs, final Deserializer<K> keyDeser, final Deserializer<V> valDeser) { this(new ConsumerConfig(appendDeserializerToConfig(configs, keyDeser, valDeser)), keyDeser, valDeser); } @SuppressWarnings("unchecked") public PrototypeAsyncConsumer(final ConsumerConfig config, final Deserializer<K> keyDeserializer, final Deserializer<V> valueDeserializer) { this.time = Time.SYSTEM; GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig(config, GroupRebalanceConfig.ProtocolType.CONSUMER); this.groupId = Optional.ofNullable(groupRebalanceConfig.groupId); this.clientId = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG); this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); // If group.instance.id is set, we will append it to the log context. if (groupRebalanceConfig.groupInstanceId.isPresent()) { logContext = new LogContext("[Consumer instanceId=" + groupRebalanceConfig.groupInstanceId.get() + ", clientId=" + clientId + ", groupId=" + groupId.orElse("null") + "] "); } else { logContext = new LogContext("[Consumer clientId=" + clientId + ", groupId=" + groupId.orElse("null") + "] "); } this.log = logContext.logger(getClass()); OffsetResetStrategy offsetResetStrategy = OffsetResetStrategy.valueOf(config.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).toUpperCase(Locale.ROOT)); this.subscriptions = new SubscriptionState(logContext, offsetResetStrategy); this.metrics = buildMetrics(config, time, clientId); List<ConsumerInterceptor<K, V>> interceptorList = (List) config.getConfiguredInstances( ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, ConsumerInterceptor.class, Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)); ClusterResourceListeners clusterResourceListeners = configureClusterResourceListeners(keyDeserializer, valueDeserializer, metrics.reporters(), interceptorList); this.eventHandler = new DefaultEventHandler( config, groupRebalanceConfig, logContext, subscriptions, new ApiVersions(), this.metrics, clusterResourceListeners, null // this is coming from the fetcher, but we don't have one ); } // Visible for testing PrototypeAsyncConsumer( Time time, LogContext logContext, ConsumerConfig config, SubscriptionState subscriptionState, EventHandler eventHandler, Metrics metrics, ClusterResourceListeners clusterResourceListeners, Optional<String> groupId, String clientId, int defaultApiTimeoutMs) { this.time = time; this.logContext = logContext; this.log = logContext.logger(getClass()); this.subscriptions = subscriptionState; this.metrics = metrics; this.groupId = groupId; this.defaultApiTimeoutMs = defaultApiTimeoutMs; this.clientId = clientId; this.eventHandler = eventHandler; } /** * poll implementation using {@link EventHandler}. * 1. Poll for background events. If there's a fetch response event, process the record and return it. If it is * another type of event, process it. * 2. Send fetches if needed. * If the timeout expires, return an empty ConsumerRecord. * * @param timeout timeout of the poll loop * @return ConsumerRecord. It can be empty if time timeout expires. */ @Override public ConsumerRecords<K, V> poll(final Duration timeout) { try { do { if (!eventHandler.isEmpty()) { final Optional<BackgroundEvent> backgroundEvent = eventHandler.poll(); // processEvent() may process 3 types of event: // 1. Errors // 2. Callback Invocation // 3. Fetch responses // Errors will be handled or rethrown. // Callback invocation will trigger callback function execution, which is blocking until completion. // Successful fetch responses will be added to the completedFetches in the fetcher, which will then // be processed in the collectFetches(). backgroundEvent.ifPresent(event -> processEvent(event, timeout)); } // The idea here is to have the background thread sending fetches autonomously, and the fetcher // uses the poll loop to retrieve successful fetchResponse and process them on the polling thread. final Fetch<K, V> fetch = collectFetches(); if (!fetch.isEmpty()) { return processFetchResults(fetch); } // We will wait for retryBackoffMs } while (time.timer(timeout).notExpired()); } catch (final Exception e) { throw new RuntimeException(e); } return ConsumerRecords.empty(); } /** * Commit offsets returned on the last {@link #poll(Duration) poll()} for all the subscribed list of topics and * partitions. */ @Override public void commitSync() { commitSync(Duration.ofMillis(defaultApiTimeoutMs)); } private void processEvent(final BackgroundEvent backgroundEvent, final Duration timeout) { // stubbed class } private ConsumerRecords<K, V> processFetchResults(final Fetch<K, V> fetch) { // stubbed class return ConsumerRecords.empty(); } private Fetch<K, V> collectFetches() { // stubbed class return Fetch.empty(); } /** * This method sends a commit event to the EventHandler and return. */ @Override public void commitAsync() { commitAsync(null); } @Override public void commitAsync(OffsetCommitCallback callback) { commitAsync(subscriptions.allConsumed(), callback); } @Override public void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback) { CompletableFuture<Void> future = commit(offsets); final OffsetCommitCallback commitCallback = callback == null ? new DefaultOffsetCommitCallback() : callback; future.whenComplete((r, t) -> { if (t != null) { commitCallback.onComplete(offsets, new KafkaException(t)); } else { commitCallback.onComplete(offsets, null); } }).exceptionally(e -> { System.out.println(e); throw new KafkaException(e); }); } // Visible for testing CompletableFuture<Void> commit(Map<TopicPartition, OffsetAndMetadata> offsets) { maybeThrowInvalidGroupIdException(); final CommitApplicationEvent commitEvent = new CommitApplicationEvent(offsets); eventHandler.add(commitEvent); return commitEvent.future(); } @Override public void seek(TopicPartition partition, long offset) { throw new KafkaException("method not implemented"); } @Override public void seek(TopicPartition partition, OffsetAndMetadata offsetAndMetadata) { throw new KafkaException("method not implemented"); } @Override public void seekToBeginning(Collection<TopicPartition> partitions) { throw new KafkaException("method not implemented"); } @Override public void seekToEnd(Collection<TopicPartition> partitions) { throw new KafkaException("method not implemented"); } @Override public long position(TopicPartition partition) { throw new KafkaException("method not implemented"); } @Override public long position(TopicPartition partition, Duration timeout) { throw new KafkaException("method not implemented"); } @Override @Deprecated public OffsetAndMetadata committed(TopicPartition partition) { throw new KafkaException("method not implemented"); } @Override @Deprecated public OffsetAndMetadata committed(TopicPartition partition, Duration timeout) { throw new KafkaException("method not implemented"); } @Override public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) { return committed(partitions, Duration.ofMillis(defaultApiTimeoutMs)); } @Override public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions, final Duration timeout) { maybeThrowInvalidGroupIdException(); if (partitions.isEmpty()) { return new HashMap<>(); } final OffsetFetchApplicationEvent event = new OffsetFetchApplicationEvent(partitions); eventHandler.add(event); try { return event.complete(Duration.ofMillis(100)); } catch (InterruptedException e) { throw new InterruptException(e); } catch (TimeoutException e) { throw new org.apache.kafka.common.errors.TimeoutException(e); } catch (ExecutionException e) { // Execution exception is thrown here throw new KafkaException(e); } catch (Exception e) { throw e; } } private void maybeThrowInvalidGroupIdException() { if (!groupId.isPresent() || groupId.get().isEmpty()) { throw new InvalidGroupIdException("To use the group management or offset commit APIs, you must " + "provide a valid " + ConsumerConfig.GROUP_ID_CONFIG + " in the consumer configuration."); } } @Override public Map<MetricName, ? extends Metric> metrics() { throw new KafkaException("method not implemented"); } @Override public List<PartitionInfo> partitionsFor(String topic) { throw new KafkaException("method not implemented"); } @Override public List<PartitionInfo> partitionsFor(String topic, Duration timeout) { throw new KafkaException("method not implemented"); } @Override public Map<String, List<PartitionInfo>> listTopics() { throw new KafkaException("method not implemented"); } @Override public Map<String, List<PartitionInfo>> listTopics(Duration timeout) { throw new KafkaException("method not implemented"); } @Override public Set<TopicPartition> paused() { throw new KafkaException("method not implemented"); } @Override public void pause(Collection<TopicPartition> partitions) { throw new KafkaException("method not implemented"); } @Override public void resume(Collection<TopicPartition> partitions) { throw new KafkaException("method not implemented"); } @Override public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) { throw new KafkaException("method not implemented"); } @Override public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch, Duration timeout) { throw new KafkaException("method not implemented"); } @Override public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions) { throw new KafkaException("method not implemented"); } @Override public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, Duration timeout) { throw new KafkaException("method not implemented"); } @Override public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions) { throw new KafkaException("method not implemented"); } @Override public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, Duration timeout) { throw new KafkaException("method not implemented"); } @Override public OptionalLong currentLag(TopicPartition topicPartition) { throw new KafkaException("method not implemented"); } @Override public ConsumerGroupMetadata groupMetadata() { throw new KafkaException("method not implemented"); } @Override public void enforceRebalance() { throw new KafkaException("method not implemented"); } @Override public void enforceRebalance(String reason) { throw new KafkaException("method not implemented"); } @Override public void close() { close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); } @Override public void close(Duration timeout) { AtomicReference<Throwable> firstException = new AtomicReference<>(); Utils.closeQuietly(this.eventHandler, "event handler", firstException); log.debug("Kafka consumer has been closed"); Throwable exception = firstException.get(); if (exception != null) { if (exception instanceof InterruptException) { throw (InterruptException) exception; } throw new KafkaException("Failed to close kafka consumer", exception); } } @Override public void wakeup() { } /** * This method sends a commit event to the EventHandler and waits for * the event to finish. * * @param timeout max wait time for the blocking operation. */ @Override public void commitSync(final Duration timeout) { commitSync(subscriptions.allConsumed(), timeout); } @Override public void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) { commitSync(offsets, Duration.ofMillis(defaultApiTimeoutMs)); } @Override public void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets, Duration timeout) { CompletableFuture<Void> commitFuture = commit(offsets); try { commitFuture.get(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (final TimeoutException e) { throw new org.apache.kafka.common.errors.TimeoutException(e); } catch (final InterruptedException e) { throw new InterruptException(e); } catch (final ExecutionException e) { throw new KafkaException(e); } catch (final Exception e) { throw e; } } @Override public Set<TopicPartition> assignment() { throw new KafkaException("method not implemented"); } /** * Get the current subscription. or an empty set if no such call has * been made. * @return The set of topics currently subscribed to */ @Override public Set<String> subscription() { return Collections.unmodifiableSet(this.subscriptions.subscription()); } @Override public void subscribe(Collection<String> topics) { throw new KafkaException("method not implemented"); } @Override public void subscribe(Collection<String> topics, ConsumerRebalanceListener callback) { throw new KafkaException("method not implemented"); } @Override public void assign(Collection<TopicPartition> partitions) { throw new KafkaException("method not implemented"); } @Override public void subscribe(Pattern pattern, ConsumerRebalanceListener callback) { throw new KafkaException("method not implemented"); } @Override public void subscribe(Pattern pattern) { throw new KafkaException("method not implemented"); } @Override public void unsubscribe() { throw new KafkaException("method not implemented"); } @Override @Deprecated public ConsumerRecords<K, V> poll(long timeout) { throw new KafkaException("method not implemented"); } private static <K, V> ClusterResourceListeners configureClusterResourceListeners( final Deserializer<K> keyDeserializer, final Deserializer<V> valueDeserializer, final List<?>... candidateLists) { ClusterResourceListeners clusterResourceListeners = new ClusterResourceListeners(); for (List<?> candidateList: candidateLists) clusterResourceListeners.maybeAddAll(candidateList); clusterResourceListeners.maybeAdd(keyDeserializer); clusterResourceListeners.maybeAdd(valueDeserializer); return clusterResourceListeners; } // This is here temporary as we don't have public access to the ConsumerConfig in this module. public static Map<String, Object> appendDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { // validate deserializer configuration, if the passed deserializer instance is null, the user must explicitly set a valid deserializer configuration value Map<String, Object> newConfigs = new HashMap<>(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); else if (newConfigs.get(KEY_DESERIALIZER_CLASS_CONFIG) == null) throw new ConfigException(KEY_DESERIALIZER_CLASS_CONFIG, null, "must be non-null."); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); else if (newConfigs.get(VALUE_DESERIALIZER_CLASS_CONFIG) == null) throw new ConfigException(VALUE_DESERIALIZER_CLASS_CONFIG, null, "must be non-null."); return newConfigs; } private static Metrics buildMetrics( final ConsumerConfig config, final Time time, final String clientId) { Map<String, String> metricsTags = Collections.singletonMap(CLIENT_ID_METRIC_TAG, clientId); MetricConfig metricConfig = new MetricConfig() .samples(config.getInt(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG)) .timeWindow(config.getLong(ConsumerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) .recordLevel(Sensor.RecordingLevel.forName(config.getString(ConsumerConfig.METRICS_RECORDING_LEVEL_CONFIG))) .tags(metricsTags); List<MetricsReporter> reporters = CommonClientConfigs.metricsReporters(clientId, config); MetricsContext metricsContext = new KafkaMetricsContext( JMX_PREFIX, config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); return new Metrics(metricConfig, reporters, time, metricsContext); } private class DefaultOffsetCommitCallback implements OffsetCommitCallback { @Override public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) { if (exception != null) log.error("Offset commit with offsets {} failed", offsets, exception); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/RequestFuture.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.utils.Timer; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicReference; /** * Result of an asynchronous request from {@link ConsumerNetworkClient}. Use {@link ConsumerNetworkClient#poll(Timer)} * (and variants) to finish a request future. Use {@link #isDone()} to check if the future is complete, and * {@link #succeeded()} to check if the request completed successfully. Typical usage might look like this: * * <pre> * RequestFuture<ClientResponse> future = client.send(api, request); * client.poll(future); * * if (future.succeeded()) { * ClientResponse response = future.value(); * // Handle response * } else { * throw future.exception(); * } * </pre> * * @param <T> Return type of the result (Can be Void if there is no response) */ public class RequestFuture<T> implements ConsumerNetworkClient.PollCondition { private static final Object INCOMPLETE_SENTINEL = new Object(); private final AtomicReference<Object> result = new AtomicReference<>(INCOMPLETE_SENTINEL); private final ConcurrentLinkedQueue<RequestFutureListener<T>> listeners = new ConcurrentLinkedQueue<>(); private final CountDownLatch completedLatch = new CountDownLatch(1); /** * Check whether the response is ready to be handled * @return true if the response is ready, false otherwise */ public boolean isDone() { return result.get() != INCOMPLETE_SENTINEL; } public boolean awaitDone(long timeout, TimeUnit unit) throws InterruptedException { return completedLatch.await(timeout, unit); } /** * Get the value corresponding to this request (only available if the request succeeded) * @return the value set in {@link #complete(Object)} * @throws IllegalStateException if the future is not complete or failed */ @SuppressWarnings("unchecked") public T value() { if (!succeeded()) throw new IllegalStateException("Attempt to retrieve value from future which hasn't successfully completed"); return (T) result.get(); } /** * Check if the request succeeded; * @return true if the request completed and was successful */ public boolean succeeded() { return isDone() && !failed(); } /** * Check if the request failed. * @return true if the request completed with a failure */ public boolean failed() { return result.get() instanceof RuntimeException; } /** * Check if the request is retriable (convenience method for checking if * the exception is an instance of {@link RetriableException}. * @return true if it is retriable, false otherwise * @throws IllegalStateException if the future is not complete or completed successfully */ public boolean isRetriable() { return exception() instanceof RetriableException; } /** * Get the exception from a failed result (only available if the request failed) * @return the exception set in {@link #raise(RuntimeException)} * @throws IllegalStateException if the future is not complete or completed successfully */ public RuntimeException exception() { if (!failed()) throw new IllegalStateException("Attempt to retrieve exception from future which hasn't failed"); return (RuntimeException) result.get(); } /** * Complete the request successfully. After this call, {@link #succeeded()} will return true * and the value can be obtained through {@link #value()}. * @param value corresponding value (or null if there is none) * @throws IllegalStateException if the future has already been completed * @throws IllegalArgumentException if the argument is an instance of {@link RuntimeException} */ public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } } /** * Raise an exception. The request will be marked as failed, and the caller can either * handle the exception or throw it. * @param e corresponding exception to be passed to caller * @throws IllegalStateException if the future has already been completed */ public void raise(RuntimeException e) { try { if (e == null) throw new IllegalArgumentException("The exception passed to raise must not be null"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, e)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireFailure(); } finally { completedLatch.countDown(); } } /** * Raise an error. The request will be marked as failed. * @param error corresponding error to be passed to caller */ public void raise(Errors error) { raise(error.exception()); } private void fireSuccess() { T value = value(); while (true) { RequestFutureListener<T> listener = listeners.poll(); if (listener == null) break; listener.onSuccess(value); } } private void fireFailure() { RuntimeException exception = exception(); while (true) { RequestFutureListener<T> listener = listeners.poll(); if (listener == null) break; listener.onFailure(exception); } } /** * Add a listener which will be notified when the future completes * @param listener non-null listener to add */ public void addListener(RequestFutureListener<T> listener) { this.listeners.add(listener); if (failed()) fireFailure(); else if (succeeded()) fireSuccess(); } /** * Convert from a request future of one type to another type * @param adapter The adapter which does the conversion * @param <S> The type of the future adapted to * @return The new future */ public <S> RequestFuture<S> compose(final RequestFutureAdapter<T, S> adapter) { final RequestFuture<S> adapted = new RequestFuture<>(); addListener(new RequestFutureListener<T>() { @Override public void onSuccess(T value) { adapter.onSuccess(value, adapted); } @Override public void onFailure(RuntimeException e) { adapter.onFailure(e, adapted); } }); return adapted; } public void chain(final RequestFuture<T> future) { addListener(new RequestFutureListener<T>() { @Override public void onSuccess(T value) { future.complete(value); } @Override public void onFailure(RuntimeException e) { future.raise(e); } }); } public static <T> RequestFuture<T> failure(RuntimeException e) { RequestFuture<T> future = new RequestFuture<>(); future.raise(e); return future; } public static RequestFuture<Void> voidSuccess() { RequestFuture<Void> future = new RequestFuture<>(); future.complete(null); return future; } public static <T> RequestFuture<T> coordinatorNotAvailable() { return failure(Errors.COORDINATOR_NOT_AVAILABLE.exception()); } public static <T> RequestFuture<T> noBrokersAvailable() { return failure(new NoAvailableBrokersException()); } @Override public boolean shouldBlock() { return !isDone(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/RequestFutureAdapter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; /** * Adapt from a request future of one type to another. * * @param <F> Type to adapt from * @param <T> Type to adapt to */ public abstract class RequestFutureAdapter<F, T> { public abstract void onSuccess(F value, RequestFuture<T> future); public void onFailure(RuntimeException e, RequestFuture<T> future) { future.raise(e); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/RequestFutureListener.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; /** * Listener interface to hook into RequestFuture completion. */ public interface RequestFutureListener<T> { void onSuccess(T value); void onFailure(RuntimeException e); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/RequestManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.PollResult; /** * {@code PollResult} consist of {@code UnsentRequest} if there are requests to send; otherwise, return the time till * the next poll event. */ public interface RequestManager { PollResult poll(long currentTimeMs); enum Type { COORDINATOR, COMMIT } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/RequestState.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.common.utils.ExponentialBackoff; class RequestState { final static int RETRY_BACKOFF_EXP_BASE = 2; final static double RETRY_BACKOFF_JITTER = 0.2; private final ExponentialBackoff exponentialBackoff; private long lastSentMs = -1; private long lastReceivedMs = -1; private int numAttempts = 0; private long backoffMs = 0; public RequestState(long retryBackoffMs) { this.exponentialBackoff = new ExponentialBackoff( retryBackoffMs, RETRY_BACKOFF_EXP_BASE, retryBackoffMs, RETRY_BACKOFF_JITTER ); } // Visible for testing RequestState(final long retryBackoffMs, final int retryBackoffExpBase, final long retryBackoffMaxMs, final double jitter) { this.exponentialBackoff = new ExponentialBackoff( retryBackoffMs, retryBackoffExpBase, retryBackoffMaxMs, jitter ); } /** * Reset request state so that new requests can be sent immediately * and the backoff is restored to its minimal configuration. */ public void reset() { this.lastSentMs = -1; this.lastReceivedMs = -1; this.numAttempts = 0; this.backoffMs = exponentialBackoff.backoff(0); } public boolean canSendRequest(final long currentTimeMs) { if (this.lastSentMs == -1) { // no request has been sent return true; } if (this.lastReceivedMs == -1 || this.lastReceivedMs < this.lastSentMs) { // there is an inflight request return false; } return requestBackoffExpired(currentTimeMs); } public void onSendAttempt(final long currentTimeMs) { // Here we update the timer everytime we try to send a request. Also increment number of attempts. this.lastSentMs = currentTimeMs; } /** * Callback invoked after a successful send. This resets the number of attempts * to 0, but the minimal backoff will still be enforced prior to allowing a new * send. To send immediately, use {@link #reset()}. * * @param currentTimeMs Current time in milliseconds */ public void onSuccessfulAttempt(final long currentTimeMs) { this.lastReceivedMs = currentTimeMs; this.backoffMs = exponentialBackoff.backoff(0); this.numAttempts = 0; } /** * Callback invoked after a failed send. The number of attempts * will be incremented, which may increase the backoff before allowing * the next send attempt. * * @param currentTimeMs Current time in milliseconds */ public void onFailedAttempt(final long currentTimeMs) { this.lastReceivedMs = currentTimeMs; this.backoffMs = exponentialBackoff.backoff(numAttempts); this.numAttempts++; } private boolean requestBackoffExpired(final long currentTimeMs) { return remainingBackoffMs(currentTimeMs) <= 0; } long remainingBackoffMs(final long currentTimeMs) { long timeSinceLastReceiveMs = currentTimeMs - this.lastReceivedMs; return Math.max(0, backoffMs - timeSinceLastReceiveMs); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/SensorBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.common.MetricNameTemplate; import org.apache.kafka.common.Metric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.metrics.stats.Meter; import org.apache.kafka.common.metrics.stats.Min; import org.apache.kafka.common.metrics.stats.SampledStat; import org.apache.kafka.common.metrics.stats.Value; import java.util.Collections; import java.util.Map; import java.util.function.Supplier; /** * {@code SensorBuilder} takes a bit of the boilerplate out of creating {@link Sensor sensors} for recording * {@link Metric metrics}. */ public class SensorBuilder { private final Metrics metrics; private final Sensor sensor; private final boolean prexisting; private final Map<String, String> tags; public SensorBuilder(Metrics metrics, String name) { this(metrics, name, Collections::emptyMap); } public SensorBuilder(Metrics metrics, String name, Supplier<Map<String, String>> tagsSupplier) { this.metrics = metrics; Sensor s = metrics.getSensor(name); if (s != null) { sensor = s; tags = Collections.emptyMap(); prexisting = true; } else { sensor = metrics.sensor(name); tags = tagsSupplier.get(); prexisting = false; } } SensorBuilder withAvg(MetricNameTemplate name) { if (!prexisting) sensor.add(metrics.metricInstance(name, tags), new Avg()); return this; } SensorBuilder withMin(MetricNameTemplate name) { if (!prexisting) sensor.add(metrics.metricInstance(name, tags), new Min()); return this; } SensorBuilder withMax(MetricNameTemplate name) { if (!prexisting) sensor.add(metrics.metricInstance(name, tags), new Max()); return this; } SensorBuilder withValue(MetricNameTemplate name) { if (!prexisting) sensor.add(metrics.metricInstance(name, tags), new Value()); return this; } SensorBuilder withMeter(MetricNameTemplate rateName, MetricNameTemplate totalName) { if (!prexisting) { sensor.add(new Meter(metrics.metricInstance(rateName, tags), metrics.metricInstance(totalName, tags))); } return this; } SensorBuilder withMeter(SampledStat sampledStat, MetricNameTemplate rateName, MetricNameTemplate totalName) { if (!prexisting) { sensor.add(new Meter(sampledStat, metrics.metricInstance(rateName, tags), metrics.metricInstance(totalName, tags))); } return this; } Sensor build() { return sensor; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.NodeApiVersions; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.NoOffsetForPartitionException; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.internals.PartitionStates; import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.TreeSet; import java.util.function.LongSupplier; import java.util.function.Predicate; import java.util.regex.Pattern; import static org.apache.kafka.clients.consumer.internals.OffsetFetcher.hasUsableOffsetForLeaderEpochVersion; import static org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH; import static org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET; /** * A class for tracking the topics, partitions, and offsets for the consumer. A partition * is "assigned" either directly with {@link #assignFromUser(Set)} (manual assignment) * or with {@link #assignFromSubscribed(Collection)} (automatic assignment from subscription). * <p> * Once assigned, the partition is not considered "fetchable" until its initial position has * been set with {@link #seekValidated(TopicPartition, FetchPosition)}. Fetchable partitions * track a position which is the last offset that has been returned to the user. You can * suspend fetching from a partition through {@link #pause(TopicPartition)} without affecting the consumed * position. The partition will remain unfetchable until the {@link #resume(TopicPartition)} is * used. You can also query the pause state independently with {@link #isPaused(TopicPartition)}. * <p> * Note that pause state as well as the consumed positions are not preserved when partition * assignment is changed whether directly by the user or through a group rebalance. * <p> * Thread Safety: this class is thread-safe. */ public class SubscriptionState { private static final String SUBSCRIPTION_EXCEPTION_MESSAGE = "Subscription to topics, partitions and pattern are mutually exclusive"; private final Logger log; private enum SubscriptionType { NONE, AUTO_TOPICS, AUTO_PATTERN, USER_ASSIGNED } /* the type of subscription */ private SubscriptionType subscriptionType; /* the pattern user has requested */ private Pattern subscribedPattern; /* the list of topics the user has requested */ private Set<String> subscription; /* The list of topics the group has subscribed to. This may include some topics which are not part * of `subscription` for the leader of a group since it is responsible for detecting metadata changes * which require a group rebalance. */ private Set<String> groupSubscription; /* the partitions that are currently assigned, note that the order of partition matters (see FetchBuilder for more details) */ private final PartitionStates<TopicPartitionState> assignment; /* Default offset reset strategy */ private final OffsetResetStrategy defaultResetStrategy; /* User-provided listener to be invoked when assignment changes */ private ConsumerRebalanceListener rebalanceListener; private int assignmentId = 0; @Override public synchronized String toString() { return "SubscriptionState{" + "type=" + subscriptionType + ", subscribedPattern=" + subscribedPattern + ", subscription=" + String.join(",", subscription) + ", groupSubscription=" + String.join(",", groupSubscription) + ", defaultResetStrategy=" + defaultResetStrategy + ", assignment=" + assignment.partitionStateValues() + " (id=" + assignmentId + ")}"; } public synchronized String prettyString() { switch (subscriptionType) { case NONE: return "None"; case AUTO_TOPICS: return "Subscribe(" + String.join(",", subscription) + ")"; case AUTO_PATTERN: return "Subscribe(" + subscribedPattern + ")"; case USER_ASSIGNED: return "Assign(" + assignedPartitions() + " , id=" + assignmentId + ")"; default: throw new IllegalStateException("Unrecognized subscription type: " + subscriptionType); } } public SubscriptionState(LogContext logContext, OffsetResetStrategy defaultResetStrategy) { this.log = logContext.logger(this.getClass()); this.defaultResetStrategy = defaultResetStrategy; this.subscription = new TreeSet<>(); // use a sorted set for better logging this.assignment = new PartitionStates<>(); this.groupSubscription = new HashSet<>(); this.subscribedPattern = null; this.subscriptionType = SubscriptionType.NONE; } /** * Monotonically increasing id which is incremented after every assignment change. This can * be used to check when an assignment has changed. * * @return The current assignment Id */ synchronized int assignmentId() { return assignmentId; } /** * This method sets the subscription type if it is not already set (i.e. when it is NONE), * or verifies that the subscription type is equal to the give type when it is set (i.e. * when it is not NONE) * @param type The given subscription type */ private void setSubscriptionType(SubscriptionType type) { if (this.subscriptionType == SubscriptionType.NONE) this.subscriptionType = type; else if (this.subscriptionType != type) throw new IllegalStateException(SUBSCRIPTION_EXCEPTION_MESSAGE); } public synchronized boolean subscribe(Set<String> topics, ConsumerRebalanceListener listener) { registerRebalanceListener(listener); setSubscriptionType(SubscriptionType.AUTO_TOPICS); return changeSubscription(topics); } public synchronized void subscribe(Pattern pattern, ConsumerRebalanceListener listener) { registerRebalanceListener(listener); setSubscriptionType(SubscriptionType.AUTO_PATTERN); this.subscribedPattern = pattern; } public synchronized boolean subscribeFromPattern(Set<String> topics) { if (subscriptionType != SubscriptionType.AUTO_PATTERN) throw new IllegalArgumentException("Attempt to subscribe from pattern while subscription type set to " + subscriptionType); return changeSubscription(topics); } private boolean changeSubscription(Set<String> topicsToSubscribe) { if (subscription.equals(topicsToSubscribe)) return false; subscription = topicsToSubscribe; return true; } /** * Set the current group subscription. This is used by the group leader to ensure * that it receives metadata updates for all topics that the group is interested in. * * @param topics All topics from the group subscription * @return true if the group subscription contains topics which are not part of the local subscription */ synchronized boolean groupSubscribe(Collection<String> topics) { if (!hasAutoAssignedPartitions()) throw new IllegalStateException(SUBSCRIPTION_EXCEPTION_MESSAGE); groupSubscription = new HashSet<>(topics); return !subscription.containsAll(groupSubscription); } /** * Reset the group's subscription to only contain topics subscribed by this consumer. */ synchronized void resetGroupSubscription() { groupSubscription = Collections.emptySet(); } /** * Change the assignment to the specified partitions provided by the user, * note this is different from {@link #assignFromSubscribed(Collection)} * whose input partitions are provided from the subscribed topics. */ public synchronized boolean assignFromUser(Set<TopicPartition> partitions) { setSubscriptionType(SubscriptionType.USER_ASSIGNED); if (this.assignment.partitionSet().equals(partitions)) return false; assignmentId++; // update the subscribed topics Set<String> manualSubscribedTopics = new HashSet<>(); Map<TopicPartition, TopicPartitionState> partitionToState = new HashMap<>(); for (TopicPartition partition : partitions) { TopicPartitionState state = assignment.stateValue(partition); if (state == null) state = new TopicPartitionState(); partitionToState.put(partition, state); manualSubscribedTopics.add(partition.topic()); } this.assignment.set(partitionToState); return changeSubscription(manualSubscribedTopics); } /** * @return true if assignments matches subscription, otherwise false */ public synchronized boolean checkAssignmentMatchedSubscription(Collection<TopicPartition> assignments) { for (TopicPartition topicPartition : assignments) { if (this.subscribedPattern != null) { if (!this.subscribedPattern.matcher(topicPartition.topic()).matches()) { log.info("Assigned partition {} for non-subscribed topic regex pattern; subscription pattern is {}", topicPartition, this.subscribedPattern); return false; } } else { if (!this.subscription.contains(topicPartition.topic())) { log.info("Assigned partition {} for non-subscribed topic; subscription is {}", topicPartition, this.subscription); return false; } } } return true; } /** * Change the assignment to the specified partitions returned from the coordinator, note this is * different from {@link #assignFromUser(Set)} which directly set the assignment from user inputs. */ public synchronized void assignFromSubscribed(Collection<TopicPartition> assignments) { if (!this.hasAutoAssignedPartitions()) throw new IllegalArgumentException("Attempt to dynamically assign partitions while manual assignment in use"); Map<TopicPartition, TopicPartitionState> assignedPartitionStates = new HashMap<>(assignments.size()); for (TopicPartition tp : assignments) { TopicPartitionState state = this.assignment.stateValue(tp); if (state == null) state = new TopicPartitionState(); assignedPartitionStates.put(tp, state); } assignmentId++; this.assignment.set(assignedPartitionStates); } private void registerRebalanceListener(ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); this.rebalanceListener = listener; } /** * Check whether pattern subscription is in use. * */ synchronized boolean hasPatternSubscription() { return this.subscriptionType == SubscriptionType.AUTO_PATTERN; } public synchronized boolean hasNoSubscriptionOrUserAssignment() { return this.subscriptionType == SubscriptionType.NONE; } public synchronized void unsubscribe() { this.subscription = Collections.emptySet(); this.groupSubscription = Collections.emptySet(); this.assignment.clear(); this.subscribedPattern = null; this.subscriptionType = SubscriptionType.NONE; this.assignmentId++; } /** * Check whether a topic matches a subscribed pattern. * * @return true if pattern subscription is in use and the topic matches the subscribed pattern, false otherwise */ synchronized boolean matchesSubscribedPattern(String topic) { Pattern pattern = this.subscribedPattern; if (hasPatternSubscription() && pattern != null) return pattern.matcher(topic).matches(); return false; } public synchronized Set<String> subscription() { if (hasAutoAssignedPartitions()) return this.subscription; return Collections.emptySet(); } public synchronized Set<TopicPartition> pausedPartitions() { return collectPartitions(TopicPartitionState::isPaused); } /** * Get the subscription topics for which metadata is required. For the leader, this will include * the union of the subscriptions of all group members. For followers, it is just that member's * subscription. This is used when querying topic metadata to detect the metadata changes which would * require rebalancing. The leader fetches metadata for all topics in the group so that it * can do the partition assignment (which requires at least partition counts for all topics * to be assigned). * * @return The union of all subscribed topics in the group if this member is the leader * of the current generation; otherwise it returns the same set as {@link #subscription()} */ synchronized Set<String> metadataTopics() { if (groupSubscription.isEmpty()) return subscription; else if (groupSubscription.containsAll(subscription)) return groupSubscription; else { // When subscription changes `groupSubscription` may be outdated, ensure that // new subscription topics are returned. Set<String> topics = new HashSet<>(groupSubscription); topics.addAll(subscription); return topics; } } synchronized boolean needsMetadata(String topic) { return subscription.contains(topic) || groupSubscription.contains(topic); } private TopicPartitionState assignedState(TopicPartition tp) { TopicPartitionState state = this.assignment.stateValue(tp); if (state == null) throw new IllegalStateException("No current assignment for partition " + tp); return state; } private TopicPartitionState assignedStateOrNull(TopicPartition tp) { return this.assignment.stateValue(tp); } public synchronized void seekValidated(TopicPartition tp, FetchPosition position) { assignedState(tp).seekValidated(position); } public void seek(TopicPartition tp, long offset) { seekValidated(tp, new FetchPosition(offset)); } public void seekUnvalidated(TopicPartition tp, FetchPosition position) { assignedState(tp).seekUnvalidated(position); } synchronized void maybeSeekUnvalidated(TopicPartition tp, FetchPosition position, OffsetResetStrategy requestedResetStrategy) { TopicPartitionState state = assignedStateOrNull(tp); if (state == null) { log.debug("Skipping reset of partition {} since it is no longer assigned", tp); } else if (!state.awaitingReset()) { log.debug("Skipping reset of partition {} since reset is no longer needed", tp); } else if (requestedResetStrategy != state.resetStrategy) { log.debug("Skipping reset of partition {} since an alternative reset has been requested", tp); } else { log.info("Resetting offset for partition {} to position {}.", tp, position); state.seekUnvalidated(position); } } /** * @return a modifiable copy of the currently assigned partitions */ public synchronized Set<TopicPartition> assignedPartitions() { return new HashSet<>(this.assignment.partitionSet()); } /** * @return a modifiable copy of the currently assigned partitions as a list */ public synchronized List<TopicPartition> assignedPartitionsList() { return new ArrayList<>(this.assignment.partitionSet()); } /** * Provides the number of assigned partitions in a thread safe manner. * @return the number of assigned partitions. */ synchronized int numAssignedPartitions() { return this.assignment.size(); } // Visible for testing public synchronized List<TopicPartition> fetchablePartitions(Predicate<TopicPartition> isAvailable) { // Since this is in the hot-path for fetching, we do this instead of using java.util.stream API List<TopicPartition> result = new ArrayList<>(); assignment.forEach((topicPartition, topicPartitionState) -> { // Cheap check is first to avoid evaluating the predicate if possible if (topicPartitionState.isFetchable() && isAvailable.test(topicPartition)) { result.add(topicPartition); } }); return result; } public synchronized boolean hasAutoAssignedPartitions() { return this.subscriptionType == SubscriptionType.AUTO_TOPICS || this.subscriptionType == SubscriptionType.AUTO_PATTERN; } public synchronized void position(TopicPartition tp, FetchPosition position) { assignedState(tp).position(position); } /** * Enter the offset validation state if the leader for this partition is known to support a usable version of the * OffsetsForLeaderEpoch API. If the leader node does not support the API, simply complete the offset validation. * * @param apiVersions supported API versions * @param tp topic partition to validate * @param leaderAndEpoch leader epoch of the topic partition * @return true if we enter the offset validation state */ public synchronized boolean maybeValidatePositionForCurrentLeader(ApiVersions apiVersions, TopicPartition tp, Metadata.LeaderAndEpoch leaderAndEpoch) { if (leaderAndEpoch.leader.isPresent()) { NodeApiVersions nodeApiVersions = apiVersions.get(leaderAndEpoch.leader.get().idString()); if (nodeApiVersions == null || hasUsableOffsetForLeaderEpochVersion(nodeApiVersions)) { return assignedState(tp).maybeValidatePosition(leaderAndEpoch); } else { // If the broker does not support a newer version of OffsetsForLeaderEpoch, we skip validation assignedState(tp).updatePositionLeaderNoValidation(leaderAndEpoch); return false; } } else { return assignedState(tp).maybeValidatePosition(leaderAndEpoch); } } /** * Attempt to complete validation with the end offset returned from the OffsetForLeaderEpoch request. * @return Log truncation details if detected and no reset policy is defined. */ public synchronized Optional<LogTruncation> maybeCompleteValidation(TopicPartition tp, FetchPosition requestPosition, EpochEndOffset epochEndOffset) { TopicPartitionState state = assignedStateOrNull(tp); if (state == null) { log.debug("Skipping completed validation for partition {} which is not currently assigned.", tp); } else if (!state.awaitingValidation()) { log.debug("Skipping completed validation for partition {} which is no longer expecting validation.", tp); } else { SubscriptionState.FetchPosition currentPosition = state.position; if (!currentPosition.equals(requestPosition)) { log.debug("Skipping completed validation for partition {} since the current position {} " + "no longer matches the position {} when the request was sent", tp, currentPosition, requestPosition); } else if (epochEndOffset.endOffset() == UNDEFINED_EPOCH_OFFSET || epochEndOffset.leaderEpoch() == UNDEFINED_EPOCH) { if (hasDefaultOffsetResetPolicy()) { log.info("Truncation detected for partition {} at offset {}, resetting offset", tp, currentPosition); requestOffsetReset(tp); } else { log.warn("Truncation detected for partition {} at offset {}, but no reset policy is set", tp, currentPosition); return Optional.of(new LogTruncation(tp, requestPosition, Optional.empty())); } } else if (epochEndOffset.endOffset() < currentPosition.offset) { if (hasDefaultOffsetResetPolicy()) { SubscriptionState.FetchPosition newPosition = new SubscriptionState.FetchPosition( epochEndOffset.endOffset(), Optional.of(epochEndOffset.leaderEpoch()), currentPosition.currentLeader); log.info("Truncation detected for partition {} at offset {}, resetting offset to " + "the first offset known to diverge {}", tp, currentPosition, newPosition); state.seekValidated(newPosition); } else { OffsetAndMetadata divergentOffset = new OffsetAndMetadata(epochEndOffset.endOffset(), Optional.of(epochEndOffset.leaderEpoch()), null); log.warn("Truncation detected for partition {} at offset {} (the end offset from the " + "broker is {}), but no reset policy is set", tp, currentPosition, divergentOffset); return Optional.of(new LogTruncation(tp, requestPosition, Optional.of(divergentOffset))); } } else { state.completeValidation(); } } return Optional.empty(); } public synchronized boolean awaitingValidation(TopicPartition tp) { return assignedState(tp).awaitingValidation(); } public synchronized void completeValidation(TopicPartition tp) { assignedState(tp).completeValidation(); } public synchronized FetchPosition validPosition(TopicPartition tp) { return assignedState(tp).validPosition(); } public synchronized FetchPosition position(TopicPartition tp) { return assignedState(tp).position; } public synchronized Long partitionLag(TopicPartition tp, IsolationLevel isolationLevel) { TopicPartitionState topicPartitionState = assignedState(tp); if (topicPartitionState.position == null) { return null; } else if (isolationLevel == IsolationLevel.READ_COMMITTED) { return topicPartitionState.lastStableOffset == null ? null : topicPartitionState.lastStableOffset - topicPartitionState.position.offset; } else { return topicPartitionState.highWatermark == null ? null : topicPartitionState.highWatermark - topicPartitionState.position.offset; } } public synchronized Long partitionEndOffset(TopicPartition tp, IsolationLevel isolationLevel) { TopicPartitionState topicPartitionState = assignedState(tp); if (isolationLevel == IsolationLevel.READ_COMMITTED) { return topicPartitionState.lastStableOffset; } else { return topicPartitionState.highWatermark; } } public synchronized void requestPartitionEndOffset(TopicPartition tp) { TopicPartitionState topicPartitionState = assignedState(tp); topicPartitionState.requestEndOffset(); } public synchronized boolean partitionEndOffsetRequested(TopicPartition tp) { TopicPartitionState topicPartitionState = assignedState(tp); return topicPartitionState.endOffsetRequested(); } synchronized Long partitionLead(TopicPartition tp) { TopicPartitionState topicPartitionState = assignedState(tp); return topicPartitionState.logStartOffset == null ? null : topicPartitionState.position.offset - topicPartitionState.logStartOffset; } synchronized void updateHighWatermark(TopicPartition tp, long highWatermark) { assignedState(tp).highWatermark(highWatermark); } synchronized void updateLogStartOffset(TopicPartition tp, long logStartOffset) { assignedState(tp).logStartOffset(logStartOffset); } synchronized void updateLastStableOffset(TopicPartition tp, long lastStableOffset) { assignedState(tp).lastStableOffset(lastStableOffset); } /** * Set the preferred read replica with a lease timeout. After this time, the replica will no longer be valid and * {@link #preferredReadReplica(TopicPartition, long)} will return an empty result. * * @param tp The topic partition * @param preferredReadReplicaId The preferred read replica * @param timeMs The time at which this preferred replica is no longer valid */ public synchronized void updatePreferredReadReplica(TopicPartition tp, int preferredReadReplicaId, LongSupplier timeMs) { assignedState(tp).updatePreferredReadReplica(preferredReadReplicaId, timeMs); } /** * Get the preferred read replica * * @param tp The topic partition * @param timeMs The current time * @return Returns the current preferred read replica, if it has been set and if it has not expired. */ public synchronized Optional<Integer> preferredReadReplica(TopicPartition tp, long timeMs) { final TopicPartitionState topicPartitionState = assignedStateOrNull(tp); if (topicPartitionState == null) { return Optional.empty(); } else { return topicPartitionState.preferredReadReplica(timeMs); } } /** * Unset the preferred read replica. This causes the fetcher to go back to the leader for fetches. * * @param tp The topic partition * @return the removed preferred read replica if set, None otherwise. */ public synchronized Optional<Integer> clearPreferredReadReplica(TopicPartition tp) { final TopicPartitionState topicPartitionState = assignedStateOrNull(tp); if (topicPartitionState == null) { return Optional.empty(); } else { return topicPartitionState.clearPreferredReadReplica(); } } public synchronized Map<TopicPartition, OffsetAndMetadata> allConsumed() { Map<TopicPartition, OffsetAndMetadata> allConsumed = new HashMap<>(); assignment.forEach((topicPartition, partitionState) -> { if (partitionState.hasValidPosition()) allConsumed.put(topicPartition, new OffsetAndMetadata(partitionState.position.offset, partitionState.position.offsetEpoch, "")); }); return allConsumed; } public synchronized void requestOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy) { assignedState(partition).reset(offsetResetStrategy); } public synchronized void requestOffsetReset(Collection<TopicPartition> partitions, OffsetResetStrategy offsetResetStrategy) { partitions.forEach(tp -> { log.info("Seeking to {} offset of partition {}", offsetResetStrategy, tp); assignedState(tp).reset(offsetResetStrategy); }); } public void requestOffsetReset(TopicPartition partition) { requestOffsetReset(partition, defaultResetStrategy); } synchronized void setNextAllowedRetry(Set<TopicPartition> partitions, long nextAllowResetTimeMs) { for (TopicPartition partition : partitions) { assignedState(partition).setNextAllowedRetry(nextAllowResetTimeMs); } } boolean hasDefaultOffsetResetPolicy() { return defaultResetStrategy != OffsetResetStrategy.NONE; } public synchronized boolean isOffsetResetNeeded(TopicPartition partition) { return assignedState(partition).awaitingReset(); } public synchronized OffsetResetStrategy resetStrategy(TopicPartition partition) { return assignedState(partition).resetStrategy(); } public synchronized boolean hasAllFetchPositions() { // Since this is in the hot-path for fetching, we do this instead of using java.util.stream API Iterator<TopicPartitionState> it = assignment.stateIterator(); while (it.hasNext()) { if (!it.next().hasValidPosition()) { return false; } } return true; } public synchronized Set<TopicPartition> initializingPartitions() { return collectPartitions(state -> state.fetchState.equals(FetchStates.INITIALIZING)); } private Set<TopicPartition> collectPartitions(Predicate<TopicPartitionState> filter) { Set<TopicPartition> result = new HashSet<>(); assignment.forEach((topicPartition, topicPartitionState) -> { if (filter.test(topicPartitionState)) { result.add(topicPartition); } }); return result; } public synchronized void resetInitializingPositions() { final Set<TopicPartition> partitionsWithNoOffsets = new HashSet<>(); assignment.forEach((tp, partitionState) -> { if (partitionState.fetchState.equals(FetchStates.INITIALIZING)) { if (defaultResetStrategy == OffsetResetStrategy.NONE) partitionsWithNoOffsets.add(tp); else requestOffsetReset(tp); } }); if (!partitionsWithNoOffsets.isEmpty()) throw new NoOffsetForPartitionException(partitionsWithNoOffsets); } public synchronized Set<TopicPartition> partitionsNeedingReset(long nowMs) { return collectPartitions(state -> state.awaitingReset() && !state.awaitingRetryBackoff(nowMs)); } public synchronized Set<TopicPartition> partitionsNeedingValidation(long nowMs) { return collectPartitions(state -> state.awaitingValidation() && !state.awaitingRetryBackoff(nowMs)); } public synchronized boolean isAssigned(TopicPartition tp) { return assignment.contains(tp); } public synchronized boolean isPaused(TopicPartition tp) { TopicPartitionState assignedOrNull = assignedStateOrNull(tp); return assignedOrNull != null && assignedOrNull.isPaused(); } synchronized boolean isFetchable(TopicPartition tp) { TopicPartitionState assignedOrNull = assignedStateOrNull(tp); return assignedOrNull != null && assignedOrNull.isFetchable(); } public synchronized boolean hasValidPosition(TopicPartition tp) { TopicPartitionState assignedOrNull = assignedStateOrNull(tp); return assignedOrNull != null && assignedOrNull.hasValidPosition(); } public synchronized void pause(TopicPartition tp) { assignedState(tp).pause(); } public synchronized void markPendingRevocation(Set<TopicPartition> tps) { tps.forEach(tp -> assignedState(tp).markPendingRevocation()); } public synchronized void resume(TopicPartition tp) { assignedState(tp).resume(); } synchronized void requestFailed(Set<TopicPartition> partitions, long nextRetryTimeMs) { for (TopicPartition partition : partitions) { // by the time the request failed, the assignment may no longer // contain this partition any more, in which case we would just ignore. final TopicPartitionState state = assignedStateOrNull(partition); if (state != null) state.requestFailed(nextRetryTimeMs); } } synchronized void movePartitionToEnd(TopicPartition tp) { assignment.moveToEnd(tp); } public synchronized ConsumerRebalanceListener rebalanceListener() { return rebalanceListener; } private static class TopicPartitionState { private FetchState fetchState; private FetchPosition position; // last consumed position private Long highWatermark; // the high watermark from last fetch private Long logStartOffset; // the log start offset private Long lastStableOffset; private boolean paused; // whether this partition has been paused by the user private boolean pendingRevocation; private OffsetResetStrategy resetStrategy; // the strategy to use if the offset needs resetting private Long nextRetryTimeMs; private Integer preferredReadReplica; private Long preferredReadReplicaExpireTimeMs; private boolean endOffsetRequested; TopicPartitionState() { this.paused = false; this.pendingRevocation = false; this.endOffsetRequested = false; this.fetchState = FetchStates.INITIALIZING; this.position = null; this.highWatermark = null; this.logStartOffset = null; this.lastStableOffset = null; this.resetStrategy = null; this.nextRetryTimeMs = null; this.preferredReadReplica = null; } public boolean endOffsetRequested() { return endOffsetRequested; } public void requestEndOffset() { endOffsetRequested = true; } private void transitionState(FetchState newState, Runnable runIfTransitioned) { FetchState nextState = this.fetchState.transitionTo(newState); if (nextState.equals(newState)) { this.fetchState = nextState; runIfTransitioned.run(); if (this.position == null && nextState.requiresPosition()) { throw new IllegalStateException("Transitioned subscription state to " + nextState + ", but position is null"); } else if (!nextState.requiresPosition()) { this.position = null; } } } private Optional<Integer> preferredReadReplica(long timeMs) { if (preferredReadReplicaExpireTimeMs != null && timeMs > preferredReadReplicaExpireTimeMs) { preferredReadReplica = null; return Optional.empty(); } else { return Optional.ofNullable(preferredReadReplica); } } private void updatePreferredReadReplica(int preferredReadReplica, LongSupplier timeMs) { if (this.preferredReadReplica == null || preferredReadReplica != this.preferredReadReplica) { this.preferredReadReplica = preferredReadReplica; this.preferredReadReplicaExpireTimeMs = timeMs.getAsLong(); } } private Optional<Integer> clearPreferredReadReplica() { if (preferredReadReplica != null) { int removedReplicaId = this.preferredReadReplica; this.preferredReadReplica = null; this.preferredReadReplicaExpireTimeMs = null; return Optional.of(removedReplicaId); } else { return Optional.empty(); } } private void reset(OffsetResetStrategy strategy) { transitionState(FetchStates.AWAIT_RESET, () -> { this.resetStrategy = strategy; this.nextRetryTimeMs = null; }); } /** * Check if the position exists and needs to be validated. If so, enter the AWAIT_VALIDATION state. This method * also will update the position with the current leader and epoch. * * @param currentLeaderAndEpoch leader and epoch to compare the offset with * @return true if the position is now awaiting validation */ private boolean maybeValidatePosition(Metadata.LeaderAndEpoch currentLeaderAndEpoch) { if (this.fetchState.equals(FetchStates.AWAIT_RESET)) { return false; } if (!currentLeaderAndEpoch.leader.isPresent()) { return false; } if (position != null && !position.currentLeader.equals(currentLeaderAndEpoch)) { FetchPosition newPosition = new FetchPosition(position.offset, position.offsetEpoch, currentLeaderAndEpoch); validatePosition(newPosition); preferredReadReplica = null; } return this.fetchState.equals(FetchStates.AWAIT_VALIDATION); } /** * For older versions of the API, we cannot perform offset validation so we simply transition directly to FETCHING */ private void updatePositionLeaderNoValidation(Metadata.LeaderAndEpoch currentLeaderAndEpoch) { if (position != null) { transitionState(FetchStates.FETCHING, () -> { this.position = new FetchPosition(position.offset, position.offsetEpoch, currentLeaderAndEpoch); this.nextRetryTimeMs = null; }); } } private void validatePosition(FetchPosition position) { if (position.offsetEpoch.isPresent() && position.currentLeader.epoch.isPresent()) { transitionState(FetchStates.AWAIT_VALIDATION, () -> { this.position = position; this.nextRetryTimeMs = null; }); } else { // If we have no epoch information for the current position, then we can skip validation transitionState(FetchStates.FETCHING, () -> { this.position = position; this.nextRetryTimeMs = null; }); } } /** * Clear the awaiting validation state and enter fetching. */ private void completeValidation() { if (hasPosition()) { transitionState(FetchStates.FETCHING, () -> this.nextRetryTimeMs = null); } } private boolean awaitingValidation() { return fetchState.equals(FetchStates.AWAIT_VALIDATION); } private boolean awaitingRetryBackoff(long nowMs) { return nextRetryTimeMs != null && nowMs < nextRetryTimeMs; } private boolean awaitingReset() { return fetchState.equals(FetchStates.AWAIT_RESET); } private void setNextAllowedRetry(long nextAllowedRetryTimeMs) { this.nextRetryTimeMs = nextAllowedRetryTimeMs; } private void requestFailed(long nextAllowedRetryTimeMs) { this.nextRetryTimeMs = nextAllowedRetryTimeMs; } private boolean hasValidPosition() { return fetchState.hasValidPosition(); } private boolean hasPosition() { return position != null; } private boolean isPaused() { return paused; } private void seekValidated(FetchPosition position) { transitionState(FetchStates.FETCHING, () -> { this.position = position; this.resetStrategy = null; this.nextRetryTimeMs = null; }); } private void seekUnvalidated(FetchPosition fetchPosition) { seekValidated(fetchPosition); validatePosition(fetchPosition); } private void position(FetchPosition position) { if (!hasValidPosition()) throw new IllegalStateException("Cannot set a new position without a valid current position"); this.position = position; } private FetchPosition validPosition() { if (hasValidPosition()) { return position; } else { return null; } } private void pause() { this.paused = true; } private void markPendingRevocation() { this.pendingRevocation = true; } private void resume() { this.paused = false; } private boolean isFetchable() { return !paused && !pendingRevocation && hasValidPosition(); } private void highWatermark(Long highWatermark) { this.highWatermark = highWatermark; this.endOffsetRequested = false; } private void logStartOffset(Long logStartOffset) { this.logStartOffset = logStartOffset; } private void lastStableOffset(Long lastStableOffset) { this.lastStableOffset = lastStableOffset; this.endOffsetRequested = false; } private OffsetResetStrategy resetStrategy() { return resetStrategy; } } /** * The fetch state of a partition. This class is used to determine valid state transitions and expose the some of * the behavior of the current fetch state. Actual state variables are stored in the {@link TopicPartitionState}. */ interface FetchState { default FetchState transitionTo(FetchState newState) { if (validTransitions().contains(newState)) { return newState; } else { return this; } } /** * Return the valid states which this state can transition to */ Collection<FetchState> validTransitions(); /** * Test if this state requires a position to be set */ boolean requiresPosition(); /** * Test if this state is considered to have a valid position which can be used for fetching */ boolean hasValidPosition(); } /** * An enumeration of all the possible fetch states. The state transitions are encoded in the values returned by * {@link FetchState#validTransitions}. */ enum FetchStates implements FetchState { INITIALIZING() { @Override public Collection<FetchState> validTransitions() { return Arrays.asList(FetchStates.FETCHING, FetchStates.AWAIT_RESET, FetchStates.AWAIT_VALIDATION); } @Override public boolean requiresPosition() { return false; } @Override public boolean hasValidPosition() { return false; } }, FETCHING() { @Override public Collection<FetchState> validTransitions() { return Arrays.asList(FetchStates.FETCHING, FetchStates.AWAIT_RESET, FetchStates.AWAIT_VALIDATION); } @Override public boolean requiresPosition() { return true; } @Override public boolean hasValidPosition() { return true; } }, AWAIT_RESET() { @Override public Collection<FetchState> validTransitions() { return Arrays.asList(FetchStates.FETCHING, FetchStates.AWAIT_RESET); } @Override public boolean requiresPosition() { return false; } @Override public boolean hasValidPosition() { return false; } }, AWAIT_VALIDATION() { @Override public Collection<FetchState> validTransitions() { return Arrays.asList(FetchStates.FETCHING, FetchStates.AWAIT_RESET, FetchStates.AWAIT_VALIDATION); } @Override public boolean requiresPosition() { return true; } @Override public boolean hasValidPosition() { return false; } } } /** * Represents the position of a partition subscription. * * This includes the offset and epoch from the last record in * the batch from a FetchResponse. It also includes the leader epoch at the time the batch was consumed. */ public static class FetchPosition { public final long offset; final Optional<Integer> offsetEpoch; final Metadata.LeaderAndEpoch currentLeader; FetchPosition(long offset) { this(offset, Optional.empty(), Metadata.LeaderAndEpoch.noLeaderOrEpoch()); } public FetchPosition(long offset, Optional<Integer> offsetEpoch, Metadata.LeaderAndEpoch currentLeader) { this.offset = offset; this.offsetEpoch = Objects.requireNonNull(offsetEpoch); this.currentLeader = Objects.requireNonNull(currentLeader); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; FetchPosition that = (FetchPosition) o; return offset == that.offset && offsetEpoch.equals(that.offsetEpoch) && currentLeader.equals(that.currentLeader); } @Override public int hashCode() { return Objects.hash(offset, offsetEpoch, currentLeader); } @Override public String toString() { return "FetchPosition{" + "offset=" + offset + ", offsetEpoch=" + offsetEpoch + ", currentLeader=" + currentLeader + '}'; } } public static class LogTruncation { public final TopicPartition topicPartition; public final FetchPosition fetchPosition; public final Optional<OffsetAndMetadata> divergentOffsetOpt; public LogTruncation(TopicPartition topicPartition, FetchPosition fetchPosition, Optional<OffsetAndMetadata> divergentOffsetOpt) { this.topicPartition = topicPartition; this.fetchPosition = fetchPosition; this.divergentOffsetOpt = divergentOffsetOpt; } @Override public String toString() { StringBuilder bldr = new StringBuilder() .append("(partition=") .append(topicPartition) .append(", fetchOffset=") .append(fetchPosition.offset) .append(", fetchEpoch=") .append(fetchPosition.offsetEpoch); if (divergentOffsetOpt.isPresent()) { OffsetAndMetadata divergentOffset = divergentOffsetOpt.get(); bldr.append(", divergentOffset=") .append(divergentOffset.offset()) .append(", divergentEpoch=") .append(divergentOffset.leaderEpoch()); } else { bldr.append(", divergentOffset=unknown") .append(", divergentEpoch=unknown"); } return bldr.append(")").toString(); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/TopicMetadataFetcher.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Timer; import org.slf4j.Logger; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; /** * {@link TopicMetadataFetcher} is responsible for fetching the {@link PartitionInfo} for a given set of topics. * All methods are blocking up to the {@link Timer timeout} provided. */ public class TopicMetadataFetcher { private final Logger log; private final ConsumerNetworkClient client; private final long retryBackoffMs; public TopicMetadataFetcher(LogContext logContext, ConsumerNetworkClient client, long retryBackoffMs) { this.log = logContext.logger(getClass()); this.client = client; this.retryBackoffMs = retryBackoffMs; } /** * Fetches the {@link PartitionInfo partition information} for the given topic in the cluster, or {@code null}. * * @param timer Timer bounding how long this method can block * @return The {@link List list} of {@link PartitionInfo partition information}, or {@code null} if the topic is * unknown */ public List<PartitionInfo> getTopicMetadata(String topic, boolean allowAutoTopicCreation, Timer timer) { MetadataRequest.Builder request = new MetadataRequest.Builder(Collections.singletonList(topic), allowAutoTopicCreation); Map<String, List<PartitionInfo>> topicMetadata = getTopicMetadata(request, timer); return topicMetadata.get(topic); } /** * Fetches the {@link PartitionInfo partition information} for all topics in the cluster. * * @param timer Timer bounding how long this method can block * @return The map of topics with their {@link PartitionInfo partition information} */ public Map<String, List<PartitionInfo>> getAllTopicMetadata(Timer timer) { MetadataRequest.Builder request = MetadataRequest.Builder.allTopics(); return getTopicMetadata(request, timer); } /** * Get metadata for all topics present in Kafka cluster. * * @param request The MetadataRequest to send * @param timer Timer bounding how long this method can block * @return The map of topics with their partition information */ private Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, Timer timer) { // Save the round trip if no topics are requested. if (!request.isAllTopics() && request.emptyTopicList()) return Collections.emptyMap(); do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, timer); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.buildCluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { // if there were errors, we need to check whether they were fatal or whether // we should just retry log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) // if a requested topic is unknown, we just continue and let it be absent // in the returned map continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.partitionsForTopic(topic)); return topicsPartitionInfos; } } timer.sleep(retryBackoffMs); } while (timer.notExpired()); throw new TimeoutException("Timeout expired while fetching topic metadata"); } /** * Send Metadata Request to the least loaded node in Kafka cluster asynchronously * @return A future that indicates result of sent metadata request */ private RequestFuture<ClientResponse> sendMetadataRequest(MetadataRequest.Builder request) { final Node node = client.leastLoadedNode(); if (node == null) return RequestFuture.noBrokersAvailable(); else return client.send(node, request); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/Utils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import java.io.Serializable; import java.util.Comparator; import java.util.List; import java.util.Map; import org.apache.kafka.common.TopicPartition; public final class Utils { final static class PartitionComparator implements Comparator<TopicPartition>, Serializable { private static final long serialVersionUID = 1L; private Map<String, List<String>> map; PartitionComparator(Map<String, List<String>> map) { this.map = map; } @Override public int compare(TopicPartition o1, TopicPartition o2) { int ret = map.get(o1.topic()).size() - map.get(o2.topic()).size(); if (ret == 0) { ret = o1.topic().compareTo(o2.topic()); if (ret == 0) ret = o1.partition() - o2.partition(); } return ret; } } public final static class TopicPartitionComparator implements Comparator<TopicPartition>, Serializable { private static final long serialVersionUID = 1L; @Override public int compare(TopicPartition topicPartition1, TopicPartition topicPartition2) { String topic1 = topicPartition1.topic(); String topic2 = topicPartition2.topic(); if (topic1.equals(topic2)) { return topicPartition1.partition() - topicPartition2.partition(); } else { return topic1.compareTo(topic2); } } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals.events; /** * This is the abstract definition of the events created by the KafkaConsumer API */ abstract public class ApplicationEvent { public final Type type; protected ApplicationEvent(Type type) { this.type = type; } /** * process the application event. Return true upon succesful execution, * false otherwise. * @return true if the event was successfully executed; false otherwise. */ @Override public String toString() { return type + " ApplicationEvent"; } public enum Type { NOOP, COMMIT, POLL, FETCH_COMMITTED_OFFSET, } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.internals.CommitRequestManager; import org.apache.kafka.clients.consumer.internals.NoopBackgroundEvent; import org.apache.kafka.clients.consumer.internals.RequestManager; import org.apache.kafka.common.KafkaException; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.concurrent.BlockingQueue; public class ApplicationEventProcessor { private final BlockingQueue<BackgroundEvent> backgroundEventQueue; private final Map<RequestManager.Type, Optional<RequestManager>> registry; public ApplicationEventProcessor( final BlockingQueue<BackgroundEvent> backgroundEventQueue, final Map<RequestManager.Type, Optional<RequestManager>> requestManagerRegistry) { this.backgroundEventQueue = backgroundEventQueue; this.registry = requestManagerRegistry; } public boolean process(final ApplicationEvent event) { Objects.requireNonNull(event); switch (event.type) { case NOOP: return process((NoopApplicationEvent) event); case COMMIT: return process((CommitApplicationEvent) event); case POLL: return process((PollApplicationEvent) event); case FETCH_COMMITTED_OFFSET: return process((OffsetFetchApplicationEvent) event); } return false; } /** * Processes {@link NoopApplicationEvent} and equeue a * {@link NoopBackgroundEvent}. This is intentionally left here for * demonstration purpose. * * @param event a {@link NoopApplicationEvent} */ private boolean process(final NoopApplicationEvent event) { return backgroundEventQueue.add(new NoopBackgroundEvent(event.message)); } private boolean process(final PollApplicationEvent event) { Optional<RequestManager> commitRequestManger = registry.get(RequestManager.Type.COMMIT); if (!commitRequestManger.isPresent()) { return true; } CommitRequestManager manager = (CommitRequestManager) commitRequestManger.get(); manager.updateAutoCommitTimer(event.pollTimeMs); return true; } private boolean process(final CommitApplicationEvent event) { Optional<RequestManager> commitRequestManger = registry.get(RequestManager.Type.COMMIT); if (!commitRequestManger.isPresent()) { // Leaving this error handling here, but it is a bit strange as the commit API should enforce the group.id // upfront so we should never get to this block. Exception exception = new KafkaException("Unable to commit offset. Most likely because the group.id wasn't set"); event.future().completeExceptionally(exception); return false; } CommitRequestManager manager = (CommitRequestManager) commitRequestManger.get(); manager.addOffsetCommitRequest(event.offsets()).whenComplete((r, e) -> { if (e != null) { event.future().completeExceptionally(e); return; } event.future().complete(null); }); return true; } private boolean process(final OffsetFetchApplicationEvent event) { Optional<RequestManager> commitRequestManger = registry.get(RequestManager.Type.COMMIT); if (!commitRequestManger.isPresent()) { event.future.completeExceptionally(new KafkaException("Unable to fetch committed offset because the " + "CommittedRequestManager is not available. Check if group.id was set correctly")); return false; } CommitRequestManager manager = (CommitRequestManager) commitRequestManger.get(); manager.addOffsetFetchRequest(event.partitions); return true; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals.events; /** * This is the abstract definition of the events created by the background thread. */ abstract public class BackgroundEvent { public final EventType type; public BackgroundEvent(EventType type) { this.type = type; } public enum EventType { NOOP, ERROR, } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/events/CommitApplicationEvent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; public class CommitApplicationEvent extends ApplicationEvent { final private CompletableFuture<Void> future; final private Map<TopicPartition, OffsetAndMetadata> offsets; public CommitApplicationEvent(final Map<TopicPartition, OffsetAndMetadata> offsets) { super(Type.COMMIT); this.offsets = offsets; Optional<Exception> exception = isValid(offsets); if (exception.isPresent()) { throw new RuntimeException(exception.get()); } this.future = new CompletableFuture<>(); } public CompletableFuture<Void> future() { return future; } public Map<TopicPartition, OffsetAndMetadata> offsets() { return offsets; } private Optional<Exception> isValid(final Map<TopicPartition, OffsetAndMetadata> offsets) { for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition topicPartition = entry.getKey(); OffsetAndMetadata offsetAndMetadata = entry.getValue(); if (offsetAndMetadata.offset() < 0) { return Optional.of(new IllegalArgumentException("Invalid offset: " + offsetAndMetadata.offset())); } } return Optional.empty(); } @Override public String toString() { return "CommitApplicationEvent(" + "offsets=" + offsets + ")"; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/events/ErrorBackgroundEvent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals.events; public class ErrorBackgroundEvent extends BackgroundEvent { private final Throwable exception; public ErrorBackgroundEvent(Throwable e) { super(EventType.ERROR); exception = e; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/events/EventHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals.events; import java.io.Closeable; import java.util.Optional; /** * This class interfaces with the KafkaConsumer and the background thread. It allows the caller to enqueue events via * the {@code add()} method and to retrieve events via the {@code poll()} method. */ public interface EventHandler extends Closeable { /** * Retrieves and removes a {@link BackgroundEvent}. Returns an empty Optional instance if there is nothing. * @return an Optional of {@link BackgroundEvent} if the value is present. Otherwise, an empty Optional. */ Optional<BackgroundEvent> poll(); /** * Check whether there are pending {@code BackgroundEvent} await to be consumed. * @return true if there are no pending event */ boolean isEmpty(); /** * Add an {@link ApplicationEvent} to the handler. The method returns true upon successful add; otherwise returns * false. * @param event An {@link ApplicationEvent} created by the polling thread. * @return true upon successful add. */ boolean add(ApplicationEvent event); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/events/NoopApplicationEvent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals.events; /** * The event is NoOp. This is intentionally left here for demonstration purpose. */ public class NoopApplicationEvent extends ApplicationEvent { public final String message; public NoopApplicationEvent(final String message) { super(Type.NOOP); this.message = message; } @Override public String toString() { return getClass() + "_" + this.message; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/events/OffsetFetchApplicationEvent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; import java.time.Duration; import java.util.Map; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; public class OffsetFetchApplicationEvent extends ApplicationEvent { final public CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> future; public final Set<TopicPartition> partitions; public OffsetFetchApplicationEvent(final Set<TopicPartition> partitions) { super(Type.FETCH_COMMITTED_OFFSET); this.partitions = partitions; this.future = new CompletableFuture<>(); } public Map<TopicPartition, OffsetAndMetadata> complete(final Duration duration) throws ExecutionException, InterruptedException, TimeoutException { return future.get(duration.toMillis(), TimeUnit.MILLISECONDS); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/consumer/internals/events/PollApplicationEvent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals.events; public class PollApplicationEvent extends ApplicationEvent { public final long pollTimeMs; protected PollApplicationEvent(final long currentTimeMs) { super(Type.POLL); this.pollTimeMs = currentTimeMs; } }