index int64 | repo_id string | file_path string | content string |
|---|---|---|---|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/FailedRecordProcessor.java | /*
* Copyright 2019-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.BiPredicate;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.lang.Nullable;
import org.springframework.util.backoff.BackOff;
/**
* Common super class for classes that deal with failing to consume a consumer record.
*
* @author Gary Russell
* @since 2.3.1
*
*/
public abstract class FailedRecordProcessor extends ExceptionClassifier implements DeliveryAttemptAware {
private static final BiPredicate<ConsumerRecord<?, ?>, Exception> ALWAYS_SKIP_PREDICATE = (r, e) -> true;
private static final BiPredicate<ConsumerRecord<?, ?>, Exception> NEVER_SKIP_PREDICATE = (r, e) -> false;
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); // NOSONAR
private final FailedRecordTracker failureTracker;
private boolean commitRecovered;
protected FailedRecordProcessor(@Nullable BiConsumer<ConsumerRecord<?, ?>, Exception> recoverer, BackOff backOff) {
this.failureTracker = new FailedRecordTracker(recoverer, backOff, this.logger);
}
/**
* Whether the offset for a recovered record should be committed.
* @return true to commit recovered record offsets.
*/
protected boolean isCommitRecovered() {
return this.commitRecovered;
}
/**
* Set to true to commit the offset for a recovered record.
* @param commitRecovered true to commit.
*/
public void setCommitRecovered(boolean commitRecovered) {
this.commitRecovered = commitRecovered;
}
/**
* Set a function to dynamically determine the {@link BackOff} to use, based on the
* consumer record and/or exception. If null is returned, the default BackOff will be
* used.
* @param backOffFunction the function.
* @since 2.6
*/
public void setBackOffFunction(BiFunction<ConsumerRecord<?, ?>, Exception, BackOff> backOffFunction) {
this.failureTracker.setBackOffFunction(backOffFunction);
}
/**
* Set to false to immediately attempt to recover on the next attempt instead
* of repeating the BackOff cycle when recovery fails.
* @param resetStateOnRecoveryFailure false to retain state.
* @since 2.5.5
*/
public void setResetStateOnRecoveryFailure(boolean resetStateOnRecoveryFailure) {
this.failureTracker.setResetStateOnRecoveryFailure(resetStateOnRecoveryFailure);
}
/**
* Set to true to reset the retry {@link BackOff} if the exception is a different type
* to the previous failure for the same record. The
* {@link #setBackOffFunction(BiFunction) backOffFunction}, if provided, will be
* called to get the {@link BackOff} to use for the new exception; otherwise, the
* configured {@link BackOff} will be used.
* @param resetStateOnExceptionChange true to reset.
* @since 2.6.3
*/
public void setResetStateOnExceptionChange(boolean resetStateOnExceptionChange) {
this.failureTracker.setResetStateOnExceptionChange(resetStateOnExceptionChange);
}
/**
* Set one or more {@link RetryListener} to receive notifications of retries and
* recovery.
* @param listeners the listeners.
* @since 2.7
*/
public void setRetryListeners(RetryListener... listeners) {
this.failureTracker.setRetryListeners(listeners);
}
@Override
public int deliveryAttempt(TopicPartitionOffset topicPartitionOffset) {
return this.failureTracker.deliveryAttempt(topicPartitionOffset);
}
/**
* Return a {@link BiPredicate} to call to determine whether the first record in the
* list should be skipped.
* @param records the records.
* @param thrownException the exception.
* @return the {@link BiPredicate}.
* @deprecated in favor of {@link #getRecoveryStrategy(List, Exception)}.
*/
@SuppressWarnings("deprecation")
@Deprecated
protected BiPredicate<ConsumerRecord<?, ?>, Exception> getSkipPredicate(List<ConsumerRecord<?, ?>> records,
Exception thrownException) {
if (getClassifier().classify(thrownException)) {
return this.failureTracker::skip;
}
else {
try {
this.failureTracker.getRecoverer().accept(records.get(0), thrownException);
}
catch (Exception ex) {
if (records.size() > 0) {
this.logger.error(ex, () -> "Recovery of record ("
+ ListenerUtils.recordToString(records.get(0)) + ") failed");
}
return NEVER_SKIP_PREDICATE;
}
return ALWAYS_SKIP_PREDICATE;
}
}
/**
* Return a {@link RecoveryStrategy} to call to determine whether the first record in the
* list should be skipped.
* @param records the records.
* @param thrownException the exception.
* @return the {@link RecoveryStrategy}.
* @since 2.7
*/
protected RecoveryStrategy getRecoveryStrategy(List<ConsumerRecord<?, ?>> records, Exception thrownException) {
return getRecoveryStrategy(records, null, thrownException);
}
/**
* Return a {@link RecoveryStrategy} to call to determine whether the first record in the
* list should be skipped.
* @param records the records.
* @param recoveryConsumer the consumer.
* @param thrownException the exception.
* @return the {@link RecoveryStrategy}.
* @since 2.8.4
*/
@SuppressWarnings("deprecation")
protected RecoveryStrategy getRecoveryStrategy(List<ConsumerRecord<?, ?>> records,
@Nullable Consumer<?, ?> recoveryConsumer, Exception thrownException) {
if (getClassifier().classify(thrownException)) {
return this.failureTracker::recovered;
}
else {
try {
this.failureTracker.getRecoverer().accept(records.get(0), recoveryConsumer, thrownException);
this.failureTracker.getRetryListeners().forEach(rl -> rl.recovered(records.get(0), thrownException));
}
catch (Exception ex) {
if (records.size() > 0) {
this.logger.error(ex, () -> "Recovery of record ("
+ ListenerUtils.recordToString(records.get(0)) + ") failed");
this.failureTracker.getRetryListeners().forEach(rl ->
rl.recoveryFailed(records.get(0), thrownException, ex));
}
return (rec, excep, cont, consumer) -> NEVER_SKIP_PREDICATE.test(rec, excep);
}
return (rec, excep, cont, consumer) -> ALWAYS_SKIP_PREDICATE.test(rec, excep);
}
}
public void clearThreadState() {
this.failureTracker.clearThreadState();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/FailedRecordTracker.java | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.backoff.BackOff;
import org.springframework.util.backoff.BackOffExecution;
/**
* Track record processing failure counts.
*
* @author Gary Russell
* @since 2.2
*
*/
class FailedRecordTracker implements RecoveryStrategy {
private final ThreadLocal<Map<TopicPartition, FailedRecord>> failures = new ThreadLocal<>(); // intentionally not static
private final ConsumerAwareRecordRecoverer recoverer;
private final boolean noRetries;
private final List<RetryListener> retryListeners = new ArrayList<>();
private final BackOff backOff;
private BiFunction<ConsumerRecord<?, ?>, Exception, BackOff> backOffFunction;
private boolean resetStateOnRecoveryFailure = true;
private boolean resetStateOnExceptionChange;
@SuppressWarnings("deprecation")
FailedRecordTracker(@Nullable BiConsumer<ConsumerRecord<?, ?>, Exception> recoverer, BackOff backOff,
LogAccessor logger) {
Assert.notNull(backOff, "'backOff' cannot be null");
if (recoverer == null) {
this.recoverer = (rec, consumer, thr) -> {
Map<TopicPartition, FailedRecord> map = this.failures.get();
FailedRecord failedRecord = null;
if (map != null) {
failedRecord = map.get(new TopicPartition(rec.topic(), rec.partition()));
}
logger.error(thr, "Backoff "
+ (failedRecord == null
? "none"
: failedRecord.getBackOffExecution())
+ " exhausted for " + ListenerUtils.recordToString(rec));
};
}
else {
if (recoverer instanceof ConsumerAwareRecordRecoverer) {
this.recoverer = (ConsumerAwareRecordRecoverer) recoverer;
}
else {
this.recoverer = (rec, consumer, ex) -> recoverer.accept(rec, ex);
}
}
this.noRetries = backOff.start().nextBackOff() == BackOffExecution.STOP;
this.backOff = backOff;
}
/**
* Set a function to dynamically determine the {@link BackOff} to use, based on the
* consumer record and/or exception. If null is returned, the default BackOff will be
* used.
* @param backOffFunction the function.
* @since 2.6
*/
public void setBackOffFunction(@Nullable BiFunction<ConsumerRecord<?, ?>, Exception, BackOff> backOffFunction) {
this.backOffFunction = backOffFunction;
}
/**
* Set to false to immediately attempt to recover on the next attempt instead
* of repeating the BackOff cycle when recovery fails.
* @param resetStateOnRecoveryFailure false to retain state.
* @since 2.5.5
*/
public void setResetStateOnRecoveryFailure(boolean resetStateOnRecoveryFailure) {
this.resetStateOnRecoveryFailure = resetStateOnRecoveryFailure;
}
/**
* Set to true to reset the retry {@link BackOff} if the exception is a different type
* to the previous failure for the same record. The
* {@link #setBackOffFunction(BiFunction) backOffFunction}, if provided, will be
* called to get the {@link BackOff} to use for the new exception; otherwise, the
* configured {@link BackOff} will be used.
* @param resetStateOnExceptionChange true to reset.
* @since 2.6.3
*/
public void setResetStateOnExceptionChange(boolean resetStateOnExceptionChange) {
this.resetStateOnExceptionChange = resetStateOnExceptionChange;
}
/**
* Set one or more {@link RetryListener} to receive notifications of retries and
* recovery.
* @param listeners the listeners.
* @since 2.7
*/
public void setRetryListeners(RetryListener... listeners) {
this.retryListeners.clear();
this.retryListeners.addAll(Arrays.asList(listeners));
}
List<RetryListener> getRetryListeners() {
return this.retryListeners;
}
boolean skip(ConsumerRecord<?, ?> record, Exception exception) {
try {
return recovered(record, exception, null, null);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
return false;
}
}
@Override
public boolean recovered(ConsumerRecord<?, ?> record, Exception exception,
@Nullable MessageListenerContainer container,
@Nullable Consumer<?, ?> consumer) throws InterruptedException {
if (this.noRetries) {
attemptRecovery(record, exception, null, consumer);
return true;
}
Map<TopicPartition, FailedRecord> map = this.failures.get();
if (map == null) {
this.failures.set(new HashMap<>());
map = this.failures.get();
}
TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());
FailedRecord failedRecord = getFailedRecordInstance(record, exception, map, topicPartition);
this.retryListeners.forEach(rl ->
rl.failedDelivery(record, exception, failedRecord.getDeliveryAttempts().get()));
long nextBackOff = failedRecord.getBackOffExecution().nextBackOff();
if (nextBackOff != BackOffExecution.STOP) {
if (container == null) {
Thread.sleep(nextBackOff);
}
else {
ListenerUtils.stoppableSleep(container, nextBackOff);
}
return false;
}
else {
attemptRecovery(record, exception, topicPartition, consumer);
map.remove(topicPartition);
if (map.isEmpty()) {
this.failures.remove();
}
return true;
}
}
private FailedRecord getFailedRecordInstance(ConsumerRecord<?, ?> record, Exception exception,
Map<TopicPartition, FailedRecord> map, TopicPartition topicPartition) {
Exception realException = exception;
if (realException instanceof ListenerExecutionFailedException
&& realException.getCause() instanceof Exception) {
realException = (Exception) realException.getCause();
}
FailedRecord failedRecord = map.get(topicPartition);
if (failedRecord == null || failedRecord.getOffset() != record.offset()
|| (this.resetStateOnExceptionChange
&& !realException.getClass().isInstance(failedRecord.getLastException()))) {
failedRecord = new FailedRecord(record.offset(), determineBackOff(record, realException).start());
map.put(topicPartition, failedRecord);
}
else {
failedRecord.getDeliveryAttempts().incrementAndGet();
}
failedRecord.setLastException(realException);
return failedRecord;
}
private BackOff determineBackOff(ConsumerRecord<?, ?> record, Exception exception) {
if (this.backOffFunction == null) {
return this.backOff;
}
BackOff backOffToUse = this.backOffFunction.apply(record, exception);
return backOffToUse != null ? backOffToUse : this.backOff;
}
private void attemptRecovery(ConsumerRecord<?, ?> record, Exception exception, @Nullable TopicPartition tp,
Consumer<?, ?> consumer) {
try {
this.recoverer.accept(record, consumer, exception);
this.retryListeners.forEach(rl -> rl.recovered(record, exception));
}
catch (RuntimeException e) {
this.retryListeners.forEach(rl -> rl.recoveryFailed(record, exception, e));
if (tp != null && this.resetStateOnRecoveryFailure) {
this.failures.get().remove(tp);
}
throw e;
}
}
void clearThreadState() {
this.failures.remove();
}
ConsumerAwareRecordRecoverer getRecoverer() {
return this.recoverer;
}
/**
* Return the number of the next delivery attempt for this topic/partition/offsete.
* @param topicPartitionOffset the topic/partition/offset.
* @return the delivery attempt.
* @since 2.5
*/
int deliveryAttempt(TopicPartitionOffset topicPartitionOffset) {
Map<TopicPartition, FailedRecord> map = this.failures.get();
if (map == null) {
return 1;
}
FailedRecord failedRecord = map.get(topicPartitionOffset.getTopicPartition());
if (failedRecord == null || failedRecord.getOffset() != topicPartitionOffset.getOffset()) {
return 1;
}
return failedRecord.getDeliveryAttempts().get() + 1;
}
static final class FailedRecord {
private final long offset;
private final BackOffExecution backOffExecution;
private final AtomicInteger deliveryAttempts = new AtomicInteger(1);
private Exception lastException;
FailedRecord(long offset, BackOffExecution backOffExecution) {
this.offset = offset;
this.backOffExecution = backOffExecution;
}
long getOffset() {
return this.offset;
}
BackOffExecution getBackOffExecution() {
return this.backOffExecution;
}
AtomicInteger getDeliveryAttempts() {
return this.deliveryAttempts;
}
Exception getLastException() {
return this.lastException;
}
void setLastException(Exception lastException) {
this.lastException = lastException;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/FallbackBatchErrorHandler.java | /*
* Copyright 2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.springframework.lang.Nullable;
import org.springframework.util.backoff.BackOff;
import org.springframework.util.backoff.FixedBackOff;
/**
* A batch error handler used by the default error handler when the listener does
* not throw a {@link BatchListenerFailedException}.
*
* @author Gary Russell
* @since 2.8.3
*
*/
@SuppressWarnings("deprecation")
class FallbackBatchErrorHandler extends RetryingBatchErrorHandler {
/**
* Construct an instance with a default {@link FixedBackOff} (unlimited attempts with
* a 5 second back off).
*/
FallbackBatchErrorHandler() {
super();
}
/**
* Construct an instance with the provided {@link BackOff} and
* {@link ConsumerRecordRecoverer}. If the recoverer is {@code null}, the discarded
* records (topic-partition{@literal @}offset) will be logged.
* @param backOff the back off.
* @param recoverer the recoverer.
*/
FallbackBatchErrorHandler(BackOff backOff, @Nullable ConsumerRecordRecoverer recoverer) {
super(backOff, recoverer);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/GenericErrorHandler.java | /*
* Copyright 2016-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.springframework.lang.Nullable;
/**
* A generic error handler.
*
* @param <T> the data type.
*
* @author Gary Russell
* @since 1.1
*
*/
@FunctionalInterface
public interface GenericErrorHandler<T> {
/**
* Handle the exception.
* @param thrownException The exception.
* @param data the data.
*/
void handle(Exception thrownException, @Nullable T data);
/**
* Handle the exception.
* @param thrownException The exception.
* @param data the data.
* @param consumer the consumer.
*/
default void handle(Exception thrownException, @Nullable T data, Consumer<?, ?> consumer) {
handle(thrownException, data);
}
/**
* Optional method to clear thread state; will be called just before a consumer
* thread terminates.
* @since 2.3
*/
default void clearThreadState() {
}
/**
* Return true if the offset should be committed for a handled error (no exception
* thrown).
* @return true to commit.
* @since 2.3.2
*/
default boolean isAckAfterHandle() {
return true;
}
/**
* Set to false to prevent the container from committing the offset of a recovered
* record (when the error handler does not itself throw an exception).
* @param ack false to not commit.
* @since 2.5.6
*/
default void setAckAfterHandle(boolean ack) {
throw new UnsupportedOperationException("This error handler does not support setting this property");
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/GenericMessageListener.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.lang.Nullable;
/**
* Top level interface for listeners.
*
* @param <T> the type received by the listener.
*
* @author Gary Russell
* @since 1.1
*
*/
@FunctionalInterface
public interface GenericMessageListener<T> {
/**
* Invoked with data from kafka.
* @param data the data to be processed.
*/
void onMessage(T data);
/**
* Invoked with data from kafka. The default implementation throws
* {@link UnsupportedOperationException}.
* @param data the data to be processed.
* @param acknowledgment the acknowledgment.
*/
default void onMessage(T data, @Nullable Acknowledgment acknowledgment) {
throw new UnsupportedOperationException("Container should never call this");
}
/**
* Invoked with data from kafka and provides access to the {@link Consumer}. The
* default implementation throws {@link UnsupportedOperationException}.
* @param data the data to be processed.
* @param consumer the consumer.
* @since 2.0
*/
default void onMessage(T data, Consumer<?, ?> consumer) {
throw new UnsupportedOperationException("Container should never call this");
}
/**
* Invoked with data from kafka and provides access to the {@link Consumer}. The
* default implementation throws {@link UnsupportedOperationException}.
* @param data the data to be processed.
* @param acknowledgment the acknowledgment.
* @param consumer the consumer.
* @since 2.0
*/
default void onMessage(T data, @Nullable Acknowledgment acknowledgment, Consumer<?, ?> consumer) {
throw new UnsupportedOperationException("Container should never call this");
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/GenericMessageListenerContainer.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
/**
* Generic message listener container; adds parameters.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.1.3
*
*/
public interface GenericMessageListenerContainer<K, V> extends MessageListenerContainer {
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/KafkaBackOffManagerFactory.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
/**
*
* Creates a {@link KafkaBackOffManagerFactory} instance.
*
* @author Tomaz Fernandes
* @since 2.7
* @see KafkaConsumerBackoffManager
*/
public interface KafkaBackOffManagerFactory {
KafkaConsumerBackoffManager create();
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/KafkaBackoffException.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.common.TopicPartition;
import org.springframework.kafka.KafkaException;
/**
* Exception thrown when the consumer should not yet consume the message due to backOff.
*
* @author Tomaz Fernandes
* @since 2.7
*/
public class KafkaBackoffException extends KafkaException {
private static final long serialVersionUID = 1L;
private final String listenerId;
private final TopicPartition topicPartition;
private final long dueTimestamp;
/**
* Constructor with data from the BackOff event.
*
* @param message the error message.
* @param topicPartition the partition that was backed off.
* @param listenerId the listenerId for the consumer that was backed off.
* @param dueTimestamp the time at which the message should be consumed.
*/
public KafkaBackoffException(String message, TopicPartition topicPartition, String listenerId, long dueTimestamp) {
super(message);
this.listenerId = listenerId;
this.topicPartition = topicPartition;
this.dueTimestamp = dueTimestamp;
}
public String getListenerId() {
return this.listenerId;
}
public TopicPartition getTopicPartition() {
return this.topicPartition;
}
public long getDueTimestamp() {
return this.dueTimestamp;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/KafkaConsumerBackoffManager.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.TopicPartition;
/**
* Interface for backing off a {@link MessageListenerContainer}
* until a given dueTimestamp, if such timestamp is in the future.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @since 2.7
*/
public interface KafkaConsumerBackoffManager {
void backOffIfNecessary(Context context);
default Context createContext(long dueTimestamp, String listenerId, TopicPartition topicPartition,
Consumer<?, ?> messageConsumer) {
return new Context(dueTimestamp, topicPartition, listenerId, messageConsumer);
}
/**
* Provides the state that will be used for backing off.
* @since 2.7
*/
class Context {
/**
* The time after which the message should be processed,
* in milliseconds since epoch.
*/
private final long dueTimestamp;
/**
* The id for the listener that should be paused.
*/
private final String listenerId;
/**
* The topic that contains the partition to be paused.
*/
private final TopicPartition topicPartition;
/**
* The consumer of the message, if present.
*/
private final Consumer<?, ?> consumerForTimingAdjustment;
Context(long dueTimestamp, TopicPartition topicPartition, String listenerId,
Consumer<?, ?> consumerForTimingAdjustment) {
this.dueTimestamp = dueTimestamp;
this.listenerId = listenerId;
this.topicPartition = topicPartition;
this.consumerForTimingAdjustment = consumerForTimingAdjustment;
}
public long getDueTimestamp() {
return this.dueTimestamp;
}
public String getListenerId() {
return this.listenerId;
}
public TopicPartition getTopicPartition() {
return this.topicPartition;
}
public Consumer<?, ?> getConsumerForTimingAdjustment() {
return this.consumerForTimingAdjustment;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/KafkaConsumerTimingAdjuster.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.TopicPartition;
/**
*
* Adjusts the consumption timing of the given consumer to try to have it consume the
* next message at a given time until due. Since the {@link org.apache.kafka.clients.consumer.KafkaConsumer}
* executes on a single thread, this is done in a best-effort basis.
*
* @author Tomaz Fernandes
* @since 2.7
* @see KafkaConsumerBackoffManager
*/
public interface KafkaConsumerTimingAdjuster {
/**
* Executes the timing adjustment.
*
* @param consumerToAdjust the consumer that will have consumption adjusted
* @param topicPartitionToAdjust the consumer's topic partition to be adjusted
* @param containerPollTimeout the consumer's container pollTimeout property
* @param timeUntilNextMessageIsDue the time when the next message should be consumed
*
* @return the applied adjustment amount
*/
long adjustTiming(Consumer<?, ?> consumerToAdjust, TopicPartition topicPartitionToAdjust,
long containerPollTimeout, long timeUntilNextMessageIsDue);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/KafkaExceptionLogLevelAware.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.springframework.kafka.KafkaException;
import org.springframework.util.Assert;
/**
* A top level abstract class for classes that can be configured with a
* {@link KafkaException.Level}.
*
* @author Gary Russell
* @since 2.5
*
*/
public abstract class KafkaExceptionLogLevelAware {
private KafkaException.Level logLevel = KafkaException.Level.ERROR;
/**
* Set the level at which the exception thrown by this handler is logged.
* @param logLevel the level (default ERROR).
*/
public void setLogLevel(KafkaException.Level logLevel) {
Assert.notNull(logLevel, "'logLevel' cannot be null");
this.logLevel = logLevel;
}
/**
* Set the level at which the exception thrown by this handler is logged.
* @return the level.
*/
protected KafkaException.Level getLogLevel() {
return this.logLevel;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/KafkaListenerErrorHandler.java | /*
* Copyright 2017-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.springframework.messaging.Message;
/**
* An error handler which is called when a {@code @KafkaListener} method
* throws an exception. This is invoked higher up the stack than the
* listener container's error handler. For methods annotated with
* {@code @SendTo}, the error handler can return a result.
*
* @author Venil Noronha
* @author Gary Russell
* @author Artem Bilan
*
* @since 1.3
*/
@FunctionalInterface
public interface KafkaListenerErrorHandler {
/**
* Handle the error.
* @param message the spring-messaging message.
* @param exception the exception the listener threw, wrapped in a
* {@link ListenerExecutionFailedException}.
* @return the return value is ignored unless the annotated method has a
* {@code @SendTo} annotation.
*/
Object handleError(Message<?> message, ListenerExecutionFailedException exception);
/**
* Handle the error.
* @param message the spring-messaging message.
* @param exception the exception the listener threw, wrapped in a
* {@link ListenerExecutionFailedException}.
* @param consumer the consumer.
* @return the return value is ignored unless the annotated method has a
* {@code @SendTo} annotation.
*/
default Object handleError(Message<?> message, ListenerExecutionFailedException exception,
Consumer<?, ?> consumer) {
return handleError(message, exception);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/KafkaMessageListenerContainer.java | /*
* Copyright 2016-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.nio.ByteBuffer;
import java.time.Duration;
import java.util.AbstractMap.SimpleEntry;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.NoOffsetForPartitionException;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.apache.kafka.clients.consumer.RetriableCommitFailedException;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.AuthorizationException;
import org.apache.kafka.common.errors.FencedInstanceIdException;
import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.kafka.common.errors.RebalanceInProgressException;
import org.apache.kafka.common.errors.WakeupException;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.springframework.beans.BeanUtils;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.core.log.LogAccessor;
import org.springframework.core.task.AsyncListenableTaskExecutor;
import org.springframework.core.task.SimpleAsyncTaskExecutor;
import org.springframework.kafka.KafkaException;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.KafkaResourceHolder;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.event.ConsumerFailedToStartEvent;
import org.springframework.kafka.event.ConsumerPartitionPausedEvent;
import org.springframework.kafka.event.ConsumerPartitionResumedEvent;
import org.springframework.kafka.event.ConsumerPausedEvent;
import org.springframework.kafka.event.ConsumerResumedEvent;
import org.springframework.kafka.event.ConsumerStartedEvent;
import org.springframework.kafka.event.ConsumerStartingEvent;
import org.springframework.kafka.event.ConsumerStoppedEvent;
import org.springframework.kafka.event.ConsumerStoppedEvent.Reason;
import org.springframework.kafka.event.ConsumerStoppingEvent;
import org.springframework.kafka.event.ListenerContainerIdleEvent;
import org.springframework.kafka.event.ListenerContainerNoLongerIdleEvent;
import org.springframework.kafka.event.ListenerContainerPartitionIdleEvent;
import org.springframework.kafka.event.ListenerContainerPartitionNoLongerIdleEvent;
import org.springframework.kafka.event.NonResponsiveConsumerEvent;
import org.springframework.kafka.listener.ConsumerSeekAware.ConsumerSeekCallback;
import org.springframework.kafka.listener.ContainerProperties.AckMode;
import org.springframework.kafka.listener.ContainerProperties.AssignmentCommitOption;
import org.springframework.kafka.listener.ContainerProperties.EOSMode;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.KafkaUtils;
import org.springframework.kafka.support.LogIfLevelEnabled;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.kafka.support.TopicPartitionOffset.SeekPosition;
import org.springframework.kafka.support.TransactionSupport;
import org.springframework.kafka.support.micrometer.MicrometerHolder;
import org.springframework.kafka.support.serializer.DeserializationException;
import org.springframework.kafka.support.serializer.ErrorHandlingDeserializer;
import org.springframework.kafka.support.serializer.SerializationUtils;
import org.springframework.kafka.transaction.KafkaAwareTransactionManager;
import org.springframework.lang.Nullable;
import org.springframework.scheduling.SchedulingAwareRunnable;
import org.springframework.scheduling.TaskScheduler;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.TransactionDefinition;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.support.TransactionCallbackWithoutResult;
import org.springframework.transaction.support.TransactionSynchronizationManager;
import org.springframework.transaction.support.TransactionTemplate;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.StringUtils;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;
/**
* Single-threaded Message listener container using the Java {@link Consumer} supporting
* auto-partition assignment or user-configured assignment.
* <p>
* With the latter, initial partition offsets can be provided.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @author Murali Reddy
* @author Marius Bogoevici
* @author Martin Dam
* @author Artem Bilan
* @author Loic Talhouarne
* @author Vladimir Tsanev
* @author Chen Binbin
* @author Yang Qiju
* @author Tom van den Berge
* @author Lukasz Kaminski
* @author Tomaz Fernandes
*/
public class KafkaMessageListenerContainer<K, V> // NOSONAR line count
extends AbstractMessageListenerContainer<K, V> {
private static final String UNUSED = "unused";
private static final String DEPRECATION = "deprecation";
private static final String UNCHECKED = "unchecked";
private static final String RAWTYPES = "rawtypes";
private static final int DEFAULT_ACK_TIME = 5000;
private static final Map<String, Object> CONSUMER_CONFIG_DEFAULTS = ConsumerConfig.configDef().defaultValues();
private final AbstractMessageListenerContainer<K, V> thisOrParentContainer;
private final TopicPartitionOffset[] topicPartitions;
private String clientIdSuffix;
private Runnable emergencyStop = () -> stopAbnormally(() -> {
// NOSONAR
});
private volatile ListenerConsumer listenerConsumer;
private volatile ListenableFuture<?> listenerConsumerFuture;
private volatile CountDownLatch startLatch = new CountDownLatch(1);
/**
* Construct an instance with the supplied configuration properties.
* @param consumerFactory the consumer factory.
* @param containerProperties the container properties.
*/
public KafkaMessageListenerContainer(ConsumerFactory<? super K, ? super V> consumerFactory,
ContainerProperties containerProperties) {
this(null, consumerFactory, containerProperties, (TopicPartitionOffset[]) null);
}
/**
* Construct an instance with the supplied configuration properties.
* @param container a delegating container (if this is a sub-container).
* @param consumerFactory the consumer factory.
* @param containerProperties the container properties.
*/
KafkaMessageListenerContainer(AbstractMessageListenerContainer<K, V> container,
ConsumerFactory<? super K, ? super V> consumerFactory,
ContainerProperties containerProperties) {
this(container, consumerFactory, containerProperties, (TopicPartitionOffset[]) null);
}
/**
* Construct an instance with the supplied configuration properties and specific
* topics/partitions/initialOffsets.
* @param container a delegating container (if this is a sub-container).
* @param consumerFactory the consumer factory.
* @param containerProperties the container properties.
* @param topicPartitions the topics/partitions; duplicates are eliminated.
*/
KafkaMessageListenerContainer(@Nullable AbstractMessageListenerContainer<K, V> container,
ConsumerFactory<? super K, ? super V> consumerFactory,
ContainerProperties containerProperties, @Nullable TopicPartitionOffset... topicPartitions) {
super(consumerFactory, containerProperties);
Assert.notNull(consumerFactory, "A ConsumerFactory must be provided");
this.thisOrParentContainer = container == null ? this : container;
if (topicPartitions != null) {
this.topicPartitions = Arrays.copyOf(topicPartitions, topicPartitions.length);
}
else {
this.topicPartitions = containerProperties.getTopicPartitions();
}
}
/**
* Set a {@link Runnable} to call whenever an {@link Error} occurs on a listener
* thread.
* @param emergencyStop the Runnable.
* @since 2.2.1
*/
public void setEmergencyStop(Runnable emergencyStop) {
Assert.notNull(emergencyStop, "'emergencyStop' cannot be null");
this.emergencyStop = emergencyStop;
}
/**
* Set a suffix to add to the {@code client.id} consumer property (if the consumer
* factory supports it).
* @param clientIdSuffix the suffix to add.
* @since 1.0.6
*/
public void setClientIdSuffix(String clientIdSuffix) {
this.clientIdSuffix = clientIdSuffix;
}
/**
* Return the {@link TopicPartition}s currently assigned to this container,
* either explicitly or by Kafka; may be null if not assigned yet.
* @return the {@link TopicPartition}s currently assigned to this container,
* either explicitly or by Kafka; may be null if not assigned yet.
*/
@Override
@Nullable
public Collection<TopicPartition> getAssignedPartitions() {
ListenerConsumer partitionsListenerConsumer = this.listenerConsumer;
if (partitionsListenerConsumer != null) {
if (partitionsListenerConsumer.definedPartitions != null) {
return Collections.unmodifiableCollection(partitionsListenerConsumer.definedPartitions.keySet());
}
else if (partitionsListenerConsumer.assignedPartitions != null) {
return Collections.unmodifiableCollection(partitionsListenerConsumer.assignedPartitions);
}
else {
return null;
}
}
else {
return null;
}
}
@Override
@Nullable
public Map<String, Collection<TopicPartition>> getAssignmentsByClientId() {
ListenerConsumer partitionsListenerConsumer = this.listenerConsumer;
if (this.listenerConsumer != null) {
return Collections.singletonMap(partitionsListenerConsumer.getClientId(), getAssignedPartitions());
}
else {
return null;
}
}
@Override
public boolean isContainerPaused() {
return isPaused() && this.listenerConsumer != null && this.listenerConsumer.isConsumerPaused();
}
@Override
public boolean isPartitionPaused(TopicPartition topicPartition) {
return this.listenerConsumer != null && this.listenerConsumer
.isPartitionPaused(topicPartition);
}
@Override
public boolean isInExpectedState() {
return isRunning() || isStoppedNormally();
}
@Override
public void pause() {
super.pause();
KafkaMessageListenerContainer<K, V>.ListenerConsumer consumer = this.listenerConsumer;
if (consumer != null) {
consumer.wakeIfNecessary();
}
}
@Override
public void resume() {
super.resume();
KafkaMessageListenerContainer<K, V>.ListenerConsumer consumer = this.listenerConsumer;
if (consumer != null) {
this.listenerConsumer.wakeIfNecessary();
}
}
@Override
public Map<String, Map<MetricName, ? extends Metric>> metrics() {
ListenerConsumer listenerConsumerForMetrics = this.listenerConsumer;
if (listenerConsumerForMetrics != null) {
Map<MetricName, ? extends Metric> metrics = listenerConsumerForMetrics.consumer.metrics();
return Collections.singletonMap(listenerConsumerForMetrics.getClientId(), metrics);
}
return Collections.emptyMap();
}
@Override
protected void doStart() {
if (isRunning()) {
return;
}
if (this.clientIdSuffix == null) { // stand-alone container
checkTopics();
}
ContainerProperties containerProperties = getContainerProperties();
checkAckMode(containerProperties);
Object messageListener = containerProperties.getMessageListener();
AsyncListenableTaskExecutor consumerExecutor = containerProperties.getConsumerTaskExecutor();
if (consumerExecutor == null) {
consumerExecutor = new SimpleAsyncTaskExecutor(
(getBeanName() == null ? "" : getBeanName()) + "-C-");
containerProperties.setConsumerTaskExecutor(consumerExecutor);
}
GenericMessageListener<?> listener = (GenericMessageListener<?>) messageListener;
ListenerType listenerType = determineListenerType(listener);
this.listenerConsumer = new ListenerConsumer(listener, listenerType);
setRunning(true);
this.startLatch = new CountDownLatch(1);
this.listenerConsumerFuture = consumerExecutor
.submitListenable(this.listenerConsumer);
try {
if (!this.startLatch.await(containerProperties.getConsumerStartTimeout().toMillis(), TimeUnit.MILLISECONDS)) {
this.logger.error("Consumer thread failed to start - does the configured task executor "
+ "have enough threads to support all containers and concurrency?");
publishConsumerFailedToStart();
}
}
catch (@SuppressWarnings(UNUSED) InterruptedException e) {
Thread.currentThread().interrupt();
}
}
private void checkAckMode(ContainerProperties containerProperties) {
if (!this.consumerFactory.isAutoCommit()) {
AckMode ackMode = containerProperties.getAckMode();
if (ackMode.equals(AckMode.COUNT) || ackMode.equals(AckMode.COUNT_TIME)) {
Assert.state(containerProperties.getAckCount() > 0, "'ackCount' must be > 0");
}
if ((ackMode.equals(AckMode.TIME) || ackMode.equals(AckMode.COUNT_TIME))
&& containerProperties.getAckTime() == 0) {
containerProperties.setAckTime(DEFAULT_ACK_TIME);
}
}
}
private ListenerType determineListenerType(GenericMessageListener<?> listener) {
ListenerType listenerType = ListenerUtils.determineListenerType(listener);
if (listener instanceof DelegatingMessageListener) {
Object delegating = listener;
while (delegating instanceof DelegatingMessageListener) {
delegating = ((DelegatingMessageListener<?>) delegating).getDelegate();
}
listenerType = ListenerUtils.determineListenerType(delegating);
}
return listenerType;
}
@Override
protected void doStop(final Runnable callback, boolean normal) {
if (isRunning()) {
this.listenerConsumerFuture.addCallback(new StopCallback(callback));
setRunning(false);
this.listenerConsumer.wakeIfNecessaryForStop();
setStoppedNormally(normal);
}
}
private void publishIdlePartitionEvent(long idleTime, TopicPartition topicPartition, Consumer<K, V> consumer, boolean paused) {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(new ListenerContainerPartitionIdleEvent(this,
this.thisOrParentContainer, idleTime, getBeanName(), topicPartition, consumer, paused));
}
}
private void publishNoLongerIdlePartitionEvent(long idleTime, Consumer<K, V> consumer, TopicPartition topicPartition) {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(new ListenerContainerPartitionNoLongerIdleEvent(this,
this.thisOrParentContainer, idleTime, getBeanName(), topicPartition, consumer));
}
}
private void publishIdleContainerEvent(long idleTime, Consumer<?, ?> consumer, boolean paused) {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(new ListenerContainerIdleEvent(this,
this.thisOrParentContainer, idleTime, getBeanName(), getAssignedPartitions(), consumer, paused));
}
}
private void publishNoLongerIdleContainerEvent(long idleTime, Consumer<?, ?> consumer) {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(new ListenerContainerNoLongerIdleEvent(this,
this.thisOrParentContainer, idleTime, getBeanName(), getAssignedPartitions(), consumer));
}
}
private void publishNonResponsiveConsumerEvent(long timeSinceLastPoll, Consumer<?, ?> consumer) {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(
new NonResponsiveConsumerEvent(this, this.thisOrParentContainer, timeSinceLastPoll,
getBeanName(), getAssignedPartitions(), consumer));
}
}
private void publishConsumerPausedEvent(Collection<TopicPartition> partitions) {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(new ConsumerPausedEvent(this, this.thisOrParentContainer,
Collections.unmodifiableCollection(partitions)));
}
}
private void publishConsumerResumedEvent(Collection<TopicPartition> partitions) {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(new ConsumerResumedEvent(this, this.thisOrParentContainer,
Collections.unmodifiableCollection(partitions)));
}
}
private void publishConsumerPartitionPausedEvent(TopicPartition partition) {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(new ConsumerPartitionPausedEvent(this, this.thisOrParentContainer,
partition));
}
}
private void publishConsumerPartitionResumedEvent(TopicPartition partition) {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(new ConsumerPartitionResumedEvent(this, this.thisOrParentContainer,
partition));
}
}
private void publishConsumerStoppingEvent(Consumer<?, ?> consumer) {
try {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(
new ConsumerStoppingEvent(this, this.thisOrParentContainer, consumer, getAssignedPartitions()));
}
}
catch (Exception e) {
this.logger.error(e, "Failed to publish consumer stopping event");
}
}
private void publishConsumerStoppedEvent(@Nullable Throwable throwable) {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
Reason reason;
if (throwable instanceof Error) {
reason = Reason.ERROR;
}
else if (throwable instanceof StopAfterFenceException || throwable instanceof FencedInstanceIdException) {
reason = Reason.FENCED;
}
else if (throwable instanceof AuthenticationException || throwable instanceof AuthorizationException) {
reason = Reason.AUTH;
}
else if (throwable instanceof NoOffsetForPartitionException) {
reason = Reason.NO_OFFSET;
}
else {
reason = Reason.NORMAL;
}
publisher.publishEvent(new ConsumerStoppedEvent(this, this.thisOrParentContainer,
reason));
}
}
private void publishConsumerStartingEvent() {
this.startLatch.countDown();
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(new ConsumerStartingEvent(this, this.thisOrParentContainer));
}
}
private void publishConsumerStartedEvent() {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(new ConsumerStartedEvent(this, this.thisOrParentContainer));
}
}
private void publishConsumerFailedToStart() {
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
publisher.publishEvent(new ConsumerFailedToStartEvent(this, this.thisOrParentContainer));
}
}
@Override
protected AbstractMessageListenerContainer<?, ?> parentOrThis() {
return this.thisOrParentContainer;
}
@Override
public String toString() {
return "KafkaMessageListenerContainer [id=" + getBeanName()
+ (this.clientIdSuffix != null ? ", clientIndex=" + this.clientIdSuffix : "")
+ ", topicPartitions="
+ (getAssignedPartitions() == null ? "none assigned" : getAssignedPartitions())
+ "]";
}
private final class ListenerConsumer implements SchedulingAwareRunnable, ConsumerSeekCallback {
private static final String COMMITTING = "Committing: ";
private static final String ERROR_HANDLER_THREW_AN_EXCEPTION = "Error handler threw an exception";
private final LogAccessor logger = KafkaMessageListenerContainer.this.logger; // NOSONAR hide
private final ContainerProperties containerProperties = getContainerProperties();
private final OffsetCommitCallback commitCallback = this.containerProperties.getCommitCallback() != null
? this.containerProperties.getCommitCallback()
: new LoggingCommitCallback();
private final Consumer<K, V> consumer;
private final Map<String, Map<Integer, Long>> offsets = new HashMap<>();
private final Collection<TopicPartition> assignedPartitions = new LinkedHashSet<>();
private final Map<TopicPartition, OffsetAndMetadata> lastCommits = new HashMap<>();
private final Map<TopicPartition, Long> savedPositions = new HashMap<>();
private final GenericMessageListener<?> genericListener;
private final ConsumerSeekAware consumerSeekAwareListener;
private final MessageListener<K, V> listener;
private final BatchMessageListener<K, V> batchListener;
private final ListenerType listenerType;
private final boolean isConsumerAwareListener;
private final boolean isBatchListener;
private final boolean wantsFullRecords;
private final boolean autoCommit;
private final boolean isManualAck = this.containerProperties.getAckMode().equals(AckMode.MANUAL);
private final boolean isCountAck = this.containerProperties.getAckMode().equals(AckMode.COUNT)
|| this.containerProperties.getAckMode().equals(AckMode.COUNT_TIME);
private final boolean isTimeOnlyAck = this.containerProperties.getAckMode().equals(AckMode.TIME);
private final boolean isManualImmediateAck =
this.containerProperties.getAckMode().equals(AckMode.MANUAL_IMMEDIATE);
private final boolean isAnyManualAck = this.isManualAck || this.isManualImmediateAck;
private final boolean isRecordAck = this.containerProperties.getAckMode().equals(AckMode.RECORD);
private final BlockingQueue<ConsumerRecord<K, V>> acks = new LinkedBlockingQueue<>();
private final BlockingQueue<TopicPartitionOffset> seeks = new LinkedBlockingQueue<>();
private final CommonErrorHandler commonErrorHandler;
private final PlatformTransactionManager transactionManager = this.containerProperties.getTransactionManager();
@SuppressWarnings(RAWTYPES)
private final KafkaAwareTransactionManager kafkaTxManager =
this.transactionManager instanceof KafkaAwareTransactionManager
? ((KafkaAwareTransactionManager) this.transactionManager) : null;
private final TransactionTemplate transactionTemplate;
private final String consumerGroupId = getGroupId();
private final TaskScheduler taskScheduler;
private final ScheduledFuture<?> monitorTask;
private final LogIfLevelEnabled commitLogger = new LogIfLevelEnabled(this.logger,
this.containerProperties.getCommitLogLevel());
private final Duration pollTimeout = Duration.ofMillis(this.containerProperties.getPollTimeout());
private final boolean checkNullKeyForExceptions;
private final boolean checkNullValueForExceptions;
private final boolean syncCommits = this.containerProperties.isSyncCommits();
private final Duration syncCommitTimeout;
private final RecordInterceptor<K, V> recordInterceptor = !isInterceptBeforeTx() && this.kafkaTxManager != null
? getRecordInterceptor()
: null;
private final RecordInterceptor<K, V> earlyRecordInterceptor =
isInterceptBeforeTx() || this.kafkaTxManager == null
? getRecordInterceptor()
: null;
private final RecordInterceptor<K, V> commonRecordInterceptor = getRecordInterceptor();
private final BatchInterceptor<K, V> batchInterceptor = !isInterceptBeforeTx() && this.kafkaTxManager != null
? getBatchInterceptor()
: null;
private final BatchInterceptor<K, V> earlyBatchInterceptor =
isInterceptBeforeTx() || this.kafkaTxManager == null
? getBatchInterceptor()
: null;
private final BatchInterceptor<K, V> commonBatchInterceptor = getBatchInterceptor();
private final ThreadStateProcessor pollThreadStateProcessor;
private final ConsumerSeekCallback seekCallback = new InitialOrIdleSeekCallback();
private final long maxPollInterval;
private final MicrometerHolder micrometerHolder;
private final AtomicBoolean polling = new AtomicBoolean();
private final boolean subBatchPerPartition;
private final Duration authExceptionRetryInterval =
this.containerProperties.getAuthExceptionRetryInterval();
private final AssignmentCommitOption autoCommitOption = this.containerProperties.getAssignmentCommitOption();
private final boolean commitCurrentOnAssignment;
private final DeliveryAttemptAware deliveryAttemptAware;
private final EOSMode eosMode = this.containerProperties.getEosMode();
private final Map<TopicPartition, OffsetAndMetadata> commitsDuringRebalance = new HashMap<>();
private final String clientId;
private final boolean fixTxOffsets = this.containerProperties.isFixTxOffsets();
private final boolean stopImmediate = this.containerProperties.isStopImmediate();
private final Set<TopicPartition> pausedPartitions;
private final Map<TopicPartition, List<Long>> offsetsInThisBatch =
this.isAnyManualAck && this.containerProperties.isAsyncAcks()
? new HashMap<>()
: null;
private final Map<TopicPartition, List<ConsumerRecord<K, V>>> deferredOffsets =
this.isAnyManualAck && this.containerProperties.isAsyncAcks()
? new HashMap<>()
: null;
private final Map<TopicPartition, Long> lastReceivePartition;
private final Map<TopicPartition, Long> lastAlertPartition;
private final Map<TopicPartition, Boolean> wasIdlePartition;
private final byte[] listenerinfo = getListenerInfo();
private final Header infoHeader = new RecordHeader(KafkaHeaders.LISTENER_INFO, this.listenerinfo);
private final Set<TopicPartition> pausedForNack = new HashSet<>();
private Map<TopicPartition, OffsetMetadata> definedPartitions;
private int count;
private long last = System.currentTimeMillis();
private boolean fatalError;
private boolean taskSchedulerExplicitlySet;
private long lastReceive = System.currentTimeMillis();
private long lastAlertAt = this.lastReceive;
private long nackSleep = -1;
private long nackWake;
private int nackIndex;
private Iterator<TopicPartition> batchIterator;
private ConsumerRecords<K, V> lastBatch;
private Producer<?, ?> producer;
private boolean producerPerConsumerPartition;
private boolean commitRecovered;
private boolean wasIdle;
private boolean batchFailed;
private boolean pausedForAsyncAcks;
private boolean receivedSome;
private volatile boolean consumerPaused;
private volatile Thread consumerThread;
private volatile long lastPoll = System.currentTimeMillis();
@SuppressWarnings(UNCHECKED)
ListenerConsumer(GenericMessageListener<?> listener, ListenerType listenerType) {
Properties consumerProperties = propertiesFromProperties();
checkGroupInstance(consumerProperties, KafkaMessageListenerContainer.this.consumerFactory);
this.autoCommit = determineAutoCommit(consumerProperties);
this.consumer =
KafkaMessageListenerContainer.this.consumerFactory.createConsumer(
this.consumerGroupId,
this.containerProperties.getClientId(),
KafkaMessageListenerContainer.this.clientIdSuffix,
consumerProperties);
this.clientId = determineClientId();
this.transactionTemplate = determineTransactionTemplate();
this.genericListener = listener;
this.consumerSeekAwareListener = checkConsumerSeekAware(listener);
this.commitCurrentOnAssignment = determineCommitCurrent(consumerProperties,
KafkaMessageListenerContainer.this.consumerFactory.getConfigurationProperties());
subscribeOrAssignTopics(this.consumer);
if (listener instanceof BatchMessageListener) {
this.listener = null;
this.batchListener = (BatchMessageListener<K, V>) listener;
this.isBatchListener = true;
this.wantsFullRecords = this.batchListener.wantsPollResult();
this.pollThreadStateProcessor = setUpPollProcessor(true);
}
else if (listener instanceof MessageListener) {
this.listener = (MessageListener<K, V>) listener;
this.batchListener = null;
this.isBatchListener = false;
this.wantsFullRecords = false;
this.pollThreadStateProcessor = setUpPollProcessor(false);
}
else {
throw new IllegalArgumentException("Listener must be one of 'MessageListener', "
+ "'BatchMessageListener', or the variants that are consumer aware and/or "
+ "Acknowledging"
+ " not " + listener.getClass().getName());
}
this.listenerType = listenerType;
this.isConsumerAwareListener = listenerType.equals(ListenerType.ACKNOWLEDGING_CONSUMER_AWARE)
|| listenerType.equals(ListenerType.CONSUMER_AWARE);
this.commonErrorHandler = determineCommonErrorHandler();
Assert.state(!this.isBatchListener || !this.isRecordAck,
"Cannot use AckMode.RECORD with a batch listener");
if (this.containerProperties.getScheduler() != null) {
this.taskScheduler = this.containerProperties.getScheduler();
this.taskSchedulerExplicitlySet = true;
}
else {
ThreadPoolTaskScheduler threadPoolTaskScheduler = new ThreadPoolTaskScheduler();
threadPoolTaskScheduler.initialize();
this.taskScheduler = threadPoolTaskScheduler;
}
this.monitorTask = this.taskScheduler.scheduleAtFixedRate(this::checkConsumer, // NOSONAR
Duration.ofSeconds(this.containerProperties.getMonitorInterval()));
if (this.containerProperties.isLogContainerConfig()) {
this.logger.info(toString());
}
Map<String, Object> props = KafkaMessageListenerContainer.this.consumerFactory.getConfigurationProperties();
this.checkNullKeyForExceptions = this.containerProperties.isCheckDeserExWhenKeyNull()
|| checkDeserializer(findDeserializerClass(props, consumerProperties, false));
this.checkNullValueForExceptions = this.containerProperties.isCheckDeserExWhenValueNull()
|| checkDeserializer(findDeserializerClass(props, consumerProperties, true));
this.syncCommitTimeout = determineSyncCommitTimeout();
if (this.containerProperties.getSyncCommitTimeout() == null) {
// update the property so we can use it directly from code elsewhere
this.containerProperties.setSyncCommitTimeout(this.syncCommitTimeout);
if (KafkaMessageListenerContainer.this.thisOrParentContainer != null) {
KafkaMessageListenerContainer.this.thisOrParentContainer
.getContainerProperties()
.setSyncCommitTimeout(this.syncCommitTimeout);
}
}
this.maxPollInterval = obtainMaxPollInterval(consumerProperties);
this.micrometerHolder = obtainMicrometerHolder();
this.deliveryAttemptAware = setupDeliveryAttemptAware();
this.subBatchPerPartition = setupSubBatchPerPartition();
this.lastReceivePartition = new HashMap<>();
this.lastAlertPartition = new HashMap<>();
this.wasIdlePartition = new HashMap<>();
this.pausedPartitions = new HashSet<>();
}
@Nullable
private ThreadStateProcessor setUpPollProcessor(boolean batch) {
if (batch) {
if (this.commonBatchInterceptor != null) {
return this.commonBatchInterceptor;
}
}
else if (this.commonRecordInterceptor != null) {
return this.commonRecordInterceptor;
}
return null;
}
@Nullable
private CommonErrorHandler determineCommonErrorHandler() {
CommonErrorHandler common = getCommonErrorHandler();
@SuppressWarnings(DEPRECATION)
GenericErrorHandler<?> errHandler = getGenericErrorHandler();
if (common != null) {
if (errHandler != null) {
this.logger.debug("GenericErrorHandler is ignored when a CommonErrorHandler is provided");
}
return common;
}
if (errHandler == null && this.transactionManager == null) {
return new DefaultErrorHandler();
}
if (this.isBatchListener) {
validateErrorHandler(true, errHandler);
BatchErrorHandler batchErrorHandler = (BatchErrorHandler) errHandler;
if (batchErrorHandler != null) {
return new ErrorHandlerAdapter(batchErrorHandler);
}
else {
return null;
}
}
else {
validateErrorHandler(false, errHandler);
ErrorHandler eh = (ErrorHandler) errHandler;
if (eh != null) {
return new ErrorHandlerAdapter(eh);
}
else {
return null;
}
}
}
private Properties propertiesFromProperties() {
Properties propertyOverrides = this.containerProperties.getKafkaConsumerProperties();
Properties props = new Properties();
props.putAll(propertyOverrides);
Set<String> stringPropertyNames = propertyOverrides.stringPropertyNames();
// User might have provided properties as defaults
stringPropertyNames.forEach((name) -> {
if (!props.contains(name)) {
props.setProperty(name, propertyOverrides.getProperty(name));
}
});
return props;
}
String getClientId() {
return this.clientId;
}
private String determineClientId() {
Map<MetricName, ? extends Metric> metrics = this.consumer.metrics();
Iterator<MetricName> metricIterator = metrics.keySet().iterator();
if (metricIterator.hasNext()) {
return metricIterator.next().tags().get("client-id");
}
return "unknown.client.id";
}
private void checkGroupInstance(Properties properties, ConsumerFactory<K, V> consumerFactory) {
String groupInstance = properties.getProperty(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG);
if (!StringUtils.hasText(groupInstance)) {
Object factoryConfig = consumerFactory.getConfigurationProperties()
.get(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG);
if (factoryConfig instanceof String) {
groupInstance = (String) factoryConfig;
}
}
if (StringUtils.hasText(KafkaMessageListenerContainer.this.clientIdSuffix)
&& StringUtils.hasText(groupInstance)) {
properties.setProperty(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG,
groupInstance + KafkaMessageListenerContainer.this.clientIdSuffix);
}
}
@SuppressWarnings(DEPRECATION)
private boolean setupSubBatchPerPartition() {
Boolean subBatching = this.containerProperties.getSubBatchPerPartition();
if (subBatching != null) {
return subBatching;
}
if (this.transactionManager == null) {
return false;
}
return this.eosMode.getMode().equals(EOSMode.V1);
}
@Nullable
private DeliveryAttemptAware setupDeliveryAttemptAware() {
DeliveryAttemptAware aware = null;
if (this.containerProperties.isDeliveryAttemptHeader()) {
if (this.transactionManager != null) {
if (getAfterRollbackProcessor() instanceof DeliveryAttemptAware) {
aware = (DeliveryAttemptAware) getAfterRollbackProcessor();
}
}
else {
if (this.commonErrorHandler.deliveryAttemptHeader()) {
aware = this.commonErrorHandler;
}
}
}
return aware;
}
private boolean determineCommitCurrent(Properties consumerProperties, Map<String, Object> factoryConfigs) {
if (AssignmentCommitOption.NEVER.equals(this.autoCommitOption)) {
return false;
}
if (!this.autoCommit && AssignmentCommitOption.ALWAYS.equals(this.autoCommitOption)) {
return true;
}
String autoOffsetReset = consumerProperties.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG);
if (autoOffsetReset == null) {
Object config = factoryConfigs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG);
if (config instanceof String) {
autoOffsetReset = (String) config;
}
}
boolean resetLatest = autoOffsetReset == null || autoOffsetReset.equals("latest");
boolean latestOnlyOption = AssignmentCommitOption.LATEST_ONLY.equals(this.autoCommitOption)
|| AssignmentCommitOption.LATEST_ONLY_NO_TX.equals(this.autoCommitOption);
return !this.autoCommit && resetLatest && latestOnlyOption;
}
private long obtainMaxPollInterval(Properties consumerProperties) {
Object timeout = consumerProperties.get(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG);
if (timeout == null) {
timeout = KafkaMessageListenerContainer.this.consumerFactory.getConfigurationProperties()
.get(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG);
}
if (timeout instanceof Duration) {
return ((Duration) timeout).toMillis();
}
else if (timeout instanceof Number) {
return ((Number) timeout).longValue();
}
else if (timeout instanceof String) {
return Long.parseLong((String) timeout);
}
else {
if (timeout != null) {
Object timeoutToLog = timeout;
this.logger.warn(() -> "Unexpected type: " + timeoutToLog.getClass().getName()
+ " in property '"
+ ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG
+ "'; using Kafka default.");
}
return (int) CONSUMER_CONFIG_DEFAULTS.get(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG);
}
}
@Nullable
private ConsumerSeekAware checkConsumerSeekAware(GenericMessageListener<?> candidate) {
return candidate instanceof ConsumerSeekAware ? (ConsumerSeekAware) candidate : null;
}
boolean isConsumerPaused() {
return this.consumerPaused;
}
boolean isPartitionPaused(TopicPartition topicPartition) {
return this.pausedPartitions.contains(topicPartition);
}
@Nullable
private TransactionTemplate determineTransactionTemplate() {
if (this.kafkaTxManager != null) {
this.producerPerConsumerPartition =
this.kafkaTxManager.getProducerFactory().isProducerPerConsumerPartition();
}
if (this.transactionManager != null) {
TransactionTemplate template = new TransactionTemplate(this.transactionManager);
TransactionDefinition definition = this.containerProperties.getTransactionDefinition();
Assert.state(definition == null
|| definition.getPropagationBehavior() == TransactionDefinition.PROPAGATION_REQUIRED
|| definition.getPropagationBehavior() == TransactionDefinition.PROPAGATION_REQUIRES_NEW,
"Transaction propagation behavior must be REQUIRED or REQUIRES_NEW");
if (definition != null) {
BeanUtils.copyProperties(definition, template);
}
return template;
}
else {
return null;
}
}
private boolean determineAutoCommit(Properties consumerProperties) {
boolean isAutoCommit;
String autoCommitOverride = consumerProperties.getProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG);
if (!KafkaMessageListenerContainer.this.consumerFactory.getConfigurationProperties()
.containsKey(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)
&& autoCommitOverride == null) {
consumerProperties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
isAutoCommit = false;
}
else if (autoCommitOverride != null) {
isAutoCommit = Boolean.parseBoolean(autoCommitOverride);
}
else {
isAutoCommit = KafkaMessageListenerContainer.this.consumerFactory.isAutoCommit();
}
Assert.state(!this.isAnyManualAck || !isAutoCommit,
() -> "Consumer cannot be configured for auto commit for ackMode "
+ this.containerProperties.getAckMode());
return isAutoCommit;
}
private Duration determineSyncCommitTimeout() {
Duration syncTimeout = this.containerProperties.getSyncCommitTimeout();
if (syncTimeout != null) {
return syncTimeout;
}
else {
Object timeout = this.containerProperties.getKafkaConsumerProperties()
.get(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG);
if (timeout == null) {
timeout = KafkaMessageListenerContainer.this.consumerFactory.getConfigurationProperties()
.get(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG);
}
if (timeout instanceof Duration) {
return (Duration) timeout;
}
else if (timeout instanceof Number) {
return Duration.ofMillis(((Number) timeout).longValue());
}
else if (timeout instanceof String) {
return Duration.ofMillis(Long.parseLong((String) timeout));
}
else {
if (timeout != null) {
Object timeoutToLog = timeout;
this.logger.warn(() -> "Unexpected type: " + timeoutToLog.getClass().getName()
+ " in property '"
+ ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG
+ "'; defaulting to Kafka default for sync commit timeouts");
}
return Duration
.ofMillis((int) CONSUMER_CONFIG_DEFAULTS.get(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG));
}
}
}
@Nullable
private Object findDeserializerClass(Map<String, Object> props, Properties consumerOverrides, boolean isValue) {
Object configuredDeserializer = isValue
? KafkaMessageListenerContainer.this.consumerFactory.getValueDeserializer()
: KafkaMessageListenerContainer.this.consumerFactory.getKeyDeserializer();
if (configuredDeserializer == null) {
Object deser = consumerOverrides.get(isValue
? ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG
: ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG);
if (deser == null) {
deser = props.get(isValue
? ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG
: ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG);
}
return deser;
}
else {
return configuredDeserializer.getClass();
}
}
private void subscribeOrAssignTopics(final Consumer<? super K, ? super V> subscribingConsumer) {
if (KafkaMessageListenerContainer.this.topicPartitions == null) {
ConsumerRebalanceListener rebalanceListener = new ListenerConsumerRebalanceListener();
Pattern topicPattern = this.containerProperties.getTopicPattern();
if (topicPattern != null) {
subscribingConsumer.subscribe(topicPattern, rebalanceListener);
}
else {
subscribingConsumer.subscribe(Arrays.asList(this.containerProperties.getTopics()), // NOSONAR
rebalanceListener);
}
}
else {
List<TopicPartitionOffset> topicPartitionsToAssign =
Arrays.asList(KafkaMessageListenerContainer.this.topicPartitions);
this.definedPartitions = new LinkedHashMap<>(topicPartitionsToAssign.size());
for (TopicPartitionOffset topicPartition : topicPartitionsToAssign) {
this.definedPartitions.put(topicPartition.getTopicPartition(),
new OffsetMetadata(topicPartition.getOffset(), topicPartition.isRelativeToCurrent(),
topicPartition.getPosition()));
}
subscribingConsumer.assign(new ArrayList<>(this.definedPartitions.keySet()));
}
}
private boolean checkDeserializer(@Nullable Object deser) {
Class<?> deserializer = null;
if (deser instanceof Class) {
deserializer = (Class<?>) deser;
}
else if (deser instanceof String) {
try {
ApplicationContext applicationContext = getApplicationContext();
ClassLoader classLoader = applicationContext == null
? getClass().getClassLoader()
: applicationContext.getClassLoader();
deserializer = ClassUtils.forName((String) deser, classLoader);
}
catch (ClassNotFoundException | LinkageError e) {
throw new IllegalStateException(e);
}
}
else if (deser != null) {
throw new IllegalStateException("Deserializer must be a class or class name, not a " + deser.getClass());
}
return deserializer == null ? false : ErrorHandlingDeserializer.class.isAssignableFrom(deserializer);
}
protected void checkConsumer() {
long timeSinceLastPoll = System.currentTimeMillis() - this.lastPoll;
if (((float) timeSinceLastPoll) / (float) this.containerProperties.getPollTimeout()
> this.containerProperties.getNoPollThreshold()) {
publishNonResponsiveConsumerEvent(timeSinceLastPoll, this.consumer);
}
}
@Nullable
private MicrometerHolder obtainMicrometerHolder() {
MicrometerHolder holder = null;
try {
if (KafkaUtils.MICROMETER_PRESENT && this.containerProperties.isMicrometerEnabled()) {
holder = new MicrometerHolder(getApplicationContext(), getBeanName(),
"spring.kafka.listener", "Kafka Listener Timer",
this.containerProperties.getMicrometerTags());
}
}
catch (@SuppressWarnings(UNUSED) IllegalStateException ex) {
// NOSONAR - no micrometer or meter registry
}
return holder;
}
private void seekPartitions(Collection<TopicPartition> partitions, boolean idle) {
this.consumerSeekAwareListener.registerSeekCallback(this);
Map<TopicPartition, Long> current = new HashMap<>();
for (TopicPartition topicPartition : partitions) {
current.put(topicPartition, ListenerConsumer.this.consumer.position(topicPartition));
}
if (idle) {
this.consumerSeekAwareListener.onIdleContainer(current, this.seekCallback);
}
else {
this.consumerSeekAwareListener.onPartitionsAssigned(current, this.seekCallback);
}
}
private void validateErrorHandler(boolean batch, @Nullable GenericErrorHandler<?> errHandler) {
if (errHandler == null) {
return;
}
Class<?> clazz = errHandler.getClass();
Assert.state(batch
? BatchErrorHandler.class.isAssignableFrom(clazz)
: ErrorHandler.class.isAssignableFrom(clazz),
() -> "Error handler is not compatible with the message listener, expecting an instance of "
+ (batch ? "BatchErrorHandler" : "ErrorHandler") + " not " + errHandler.getClass().getName());
}
@Override
public boolean isLongLived() {
return true;
}
@SuppressWarnings(DEPRECATION)
@Override // NOSONAR complexity
public void run() {
ListenerUtils.setLogOnlyMetadata(this.containerProperties.isOnlyLogRecordMetadata());
publishConsumerStartingEvent();
this.consumerThread = Thread.currentThread();
setupSeeks();
KafkaUtils.setConsumerGroupId(this.consumerGroupId);
this.count = 0;
this.last = System.currentTimeMillis();
initAssignedPartitions();
publishConsumerStartedEvent();
Throwable exitThrowable = null;
while (isRunning()) {
try {
pollAndInvoke();
}
catch (NoOffsetForPartitionException nofpe) {
this.fatalError = true;
ListenerConsumer.this.logger.error(nofpe, "No offset and no reset policy");
exitThrowable = nofpe;
break;
}
catch (AuthenticationException | AuthorizationException ae) {
if (this.authExceptionRetryInterval == null) {
ListenerConsumer.this.logger.error(ae,
"Authentication/Authorization Exception and no authExceptionRetryInterval set");
this.fatalError = true;
exitThrowable = ae;
break;
}
else {
ListenerConsumer.this.logger.error(ae,
"Authentication/Authorization Exception, retrying in "
+ this.authExceptionRetryInterval.toMillis() + " ms");
// We can't pause/resume here, as KafkaConsumer doesn't take pausing
// into account when committing, hence risk of being flooded with
// GroupAuthorizationExceptions.
// see: https://github.com/spring-projects/spring-kafka/pull/1337
sleepFor(this.authExceptionRetryInterval);
}
}
catch (FencedInstanceIdException fie) {
this.fatalError = true;
ListenerConsumer.this.logger.error(fie, "'" + ConsumerConfig.GROUP_INSTANCE_ID_CONFIG
+ "' has been fenced");
exitThrowable = fie;
break;
}
catch (StopAfterFenceException e) {
this.logger.error(e, "Stopping container due to fencing");
stop(false);
exitThrowable = e;
}
catch (Error e) { // NOSONAR - rethrown
Runnable runnable = KafkaMessageListenerContainer.this.emergencyStop;
if (runnable != null) {
runnable.run();
}
this.logger.error(e, "Stopping container due to an Error");
wrapUp(e);
throw e;
}
catch (Exception e) {
handleConsumerException(e);
}
finally {
clearThreadState();
}
}
wrapUp(exitThrowable);
}
private void setupSeeks() {
if (this.consumerSeekAwareListener != null) {
this.consumerSeekAwareListener.registerSeekCallback(this);
}
}
private void initAssignedPartitions() {
if (isRunning() && this.definedPartitions != null) {
try {
initPartitionsIfNeeded();
}
catch (Exception e) {
this.logger.error(e, "Failed to set initial offsets");
}
}
}
protected void pollAndInvoke() {
if (!this.autoCommit && !this.isRecordAck) {
processCommits();
}
fixTxOffsetsIfNeeded();
idleBetweenPollIfNecessary();
if (this.seeks.size() > 0) {
processSeeks();
}
pauseConsumerIfNecessary();
pausePartitionsIfNecessary();
this.lastPoll = System.currentTimeMillis();
if (!isRunning()) {
return;
}
this.polling.set(true);
ConsumerRecords<K, V> records = doPoll();
if (!this.polling.compareAndSet(true, false) && records != null) {
/*
* There is a small race condition where wakeIfNecessaryForStop was called between
* exiting the poll and before we reset the boolean.
*/
if (records.count() > 0) {
this.logger.debug(() -> "Discarding polled records, container stopped: " + records.count());
}
return;
}
debugRecords(records);
resumeConsumerIfNeccessary();
if (!this.consumerPaused) {
resumePartitionsIfNecessary();
}
invokeIfHaveRecords(records);
}
private void invokeIfHaveRecords(@Nullable ConsumerRecords<K, V> records) {
if (records != null && records.count() > 0) {
this.receivedSome = true;
savePositionsIfNeeded(records);
notIdle();
notIdlePartitions(records.partitions());
invokeListener(records);
}
else {
checkIdle();
}
if (records == null || records.count() == 0
|| records.partitions().size() < this.consumer.assignment().size()) {
checkIdlePartitions();
}
}
private void clearThreadState() {
if (this.pollThreadStateProcessor != null) {
this.pollThreadStateProcessor.clearThreadState(this.consumer);
}
}
private void checkIdlePartitions() {
Set<TopicPartition> partitions = this.consumer.assignment();
partitions.forEach(this::checkIdlePartition);
}
private void checkIdlePartition(TopicPartition topicPartition) {
Long idlePartitionEventInterval = this.containerProperties.getIdlePartitionEventInterval();
if (idlePartitionEventInterval != null) {
long now = System.currentTimeMillis();
Long lstReceive = this.lastReceivePartition.computeIfAbsent(topicPartition, newTopicPartition -> now);
Long lstAlertAt = this.lastAlertPartition.computeIfAbsent(topicPartition, newTopicPartition -> now);
if (now > lstReceive + idlePartitionEventInterval
&& now > lstAlertAt + idlePartitionEventInterval) {
this.wasIdlePartition.put(topicPartition, true);
publishIdlePartitionEvent(now - lstReceive, topicPartition, this.consumer,
isPartitionPauseRequested(topicPartition));
this.lastAlertPartition.put(topicPartition, now);
if (this.consumerSeekAwareListener != null) {
seekPartitions(Collections.singletonList(topicPartition), true);
}
}
}
}
private void notIdlePartitions(Set<TopicPartition> partitions) {
if (this.containerProperties.getIdlePartitionEventInterval() != null) {
partitions.forEach(this::notIdlePartition);
}
}
private void notIdlePartition(TopicPartition topicPartition) {
long now = System.currentTimeMillis();
Boolean partitionWasIdle = this.wasIdlePartition.get(topicPartition);
if (partitionWasIdle != null && partitionWasIdle) {
this.wasIdlePartition.put(topicPartition, false);
Long lstReceive = this.lastReceivePartition.computeIfAbsent(topicPartition, newTopicPartition -> now);
publishNoLongerIdlePartitionEvent(now - lstReceive, this.consumer, topicPartition);
}
this.lastReceivePartition.put(topicPartition, now);
}
private void notIdle() {
if (this.containerProperties.getIdleEventInterval() != null) {
long now = System.currentTimeMillis();
if (this.wasIdle) {
this.wasIdle = false;
publishNoLongerIdleContainerEvent(now - this.lastReceive, this.consumer);
}
this.lastReceive = now;
}
}
private void savePositionsIfNeeded(ConsumerRecords<K, V> records) {
if (this.fixTxOffsets) {
this.savedPositions.clear();
records.partitions().forEach(tp -> this.savedPositions.put(tp, this.consumer.position(tp)));
}
}
@SuppressWarnings("rawtypes")
private void fixTxOffsetsIfNeeded() {
if (this.fixTxOffsets) {
try {
Map<TopicPartition, OffsetAndMetadata> toFix = new HashMap<>();
this.lastCommits.forEach((tp, oamd) -> {
long position = this.consumer.position(tp);
Long saved = this.savedPositions.get(tp);
if (saved != null && saved.longValue() != position) {
this.logger.debug(() -> "Skipping TX offset correction - seek(s) have been performed; "
+ "saved: " + this.savedPositions + ", "
+ "comitted: " + oamd + ", "
+ "current: " + tp + "@" + position);
return;
}
if (position > oamd.offset()) {
toFix.put(tp, new OffsetAndMetadata(position));
}
});
if (toFix.size() > 0) {
this.logger.debug(() -> "Fixing TX offsets: " + toFix);
if (this.kafkaTxManager == null) {
if (this.syncCommits) {
commitSync(toFix);
}
else {
commitAsync(toFix);
}
}
else {
this.transactionTemplate.executeWithoutResult(status -> {
doSendOffsets(((KafkaResourceHolder) TransactionSynchronizationManager
.getResource(this.kafkaTxManager.getProducerFactory()))
.getProducer(), toFix);
});
}
}
}
catch (Exception e) {
this.logger.error(e, () -> "Failed to correct transactional offset(s): "
+ ListenerConsumer.this.lastCommits);
}
finally {
ListenerConsumer.this.lastCommits.clear();
}
}
}
@Nullable
private ConsumerRecords<K, V> doPoll() {
ConsumerRecords<K, V> records;
if (this.isBatchListener && this.subBatchPerPartition) {
if (this.batchIterator == null) {
this.lastBatch = pollConsumer();
captureOffsets(this.lastBatch);
if (this.lastBatch.count() == 0) {
return this.lastBatch;
}
else {
this.batchIterator = this.lastBatch.partitions().iterator();
}
}
TopicPartition next = this.batchIterator.next();
List<ConsumerRecord<K, V>> subBatch = this.lastBatch.records(next);
records = new ConsumerRecords<>(Collections.singletonMap(next, subBatch));
if (!this.batchIterator.hasNext()) {
this.batchIterator = null;
}
}
else {
records = pollConsumer();
captureOffsets(records);
checkRebalanceCommits();
}
return records;
}
private ConsumerRecords<K, V> pollConsumer() {
beforePoll();
try {
return this.consumer.poll(this.pollTimeout);
}
catch (WakeupException ex) {
return ConsumerRecords.empty();
}
}
private void beforePoll() {
if (this.pollThreadStateProcessor != null) {
this.pollThreadStateProcessor.setupThreadState(this.consumer);
}
}
private synchronized void captureOffsets(ConsumerRecords<K, V> records) {
if (this.offsetsInThisBatch != null && records.count() > 0) {
this.offsetsInThisBatch.clear();
this.deferredOffsets.clear();
records.partitions().forEach(part -> {
LinkedList<Long> offs = new LinkedList<>();
this.offsetsInThisBatch.put(part, offs);
this.deferredOffsets.put(part, new LinkedList<>());
records.records(part).forEach(rec -> offs.add(rec.offset()));
});
}
}
private void checkRebalanceCommits() {
if (this.commitsDuringRebalance.size() > 0) {
// Attempt to recommit the offsets for partitions that we still own
Map<TopicPartition, OffsetAndMetadata> commits = this.commitsDuringRebalance.entrySet()
.stream()
.filter(entry -> this.assignedPartitions.contains(entry.getKey()))
.collect(Collectors.toMap(entry -> entry.getKey(), entry -> entry.getValue()));
this.commitsDuringRebalance.clear();
this.logger.debug(() -> "Commit list: " + commits);
commitSync(commits);
}
}
void wakeIfNecessaryForStop() {
if (this.polling.getAndSet(false)) {
this.consumer.wakeup();
}
}
void wakeIfNecessary() {
if (this.polling.get()) {
this.consumer.wakeup();
}
}
private void debugRecords(@Nullable ConsumerRecords<K, V> records) {
if (records != null) {
this.logger.debug(() -> "Received: " + records.count() + " records");
if (records.count() > 0) {
this.logger.trace(() -> records.partitions().stream()
.flatMap(p -> records.records(p).stream())
// map to same format as send metadata toString()
.map(r -> r.topic() + "-" + r.partition() + "@" + r.offset())
.collect(Collectors.toList()).toString());
}
}
}
private void sleepFor(Duration duration) {
try {
ListenerUtils.stoppableSleep(KafkaMessageListenerContainer.this, duration.toMillis());
}
catch (InterruptedException e) {
this.logger.error(e, "Interrupted while sleeping");
}
}
private void pauseConsumerIfNecessary() {
if (this.offsetsInThisBatch != null) {
synchronized (this) {
doPauseConsumerIfNecessary();
}
}
else {
doPauseConsumerIfNecessary();
}
}
private void doPauseConsumerIfNecessary() {
if (this.pausedForNack.size() > 0) {
this.logger.debug("Still paused for nack sleep");
return;
}
if (this.offsetsInThisBatch != null && this.offsetsInThisBatch.size() > 0 && !this.pausedForAsyncAcks) {
this.pausedForAsyncAcks = true;
this.logger.debug(() -> "Pausing for incomplete async acks: " + this.offsetsInThisBatch);
}
if (!this.consumerPaused && (isPaused() || this.pausedForAsyncAcks)) {
this.consumer.pause(this.consumer.assignment());
this.consumerPaused = true;
this.logger.debug(() -> "Paused consumption from: " + this.consumer.paused());
publishConsumerPausedEvent(this.consumer.assignment());
}
}
private void resumeConsumerIfNeccessary() {
if (this.nackWake > 0) {
if (System.currentTimeMillis() > this.nackWake) {
this.nackWake = 0;
this.consumer.resume(this.pausedForNack);
this.logger.debug(() -> "Resumed after nack sleep: " + this.pausedForNack);
this.pausedForNack.clear();
}
}
else if (this.offsetsInThisBatch != null) {
synchronized (this) {
doResumeConsumerIfNeccessary();
}
}
else {
doResumeConsumerIfNeccessary();
}
}
private void doResumeConsumerIfNeccessary() {
if (this.pausedForAsyncAcks && this.offsetsInThisBatch.size() == 0) {
this.pausedForAsyncAcks = false;
this.logger.debug("Resuming after manual async acks cleared");
}
if (this.consumerPaused && !isPaused() && !this.pausedForAsyncAcks) {
this.logger.debug(() -> "Resuming consumption from: " + this.consumer.paused());
Collection<TopicPartition> paused = new LinkedList<>(this.consumer.paused());
paused.removeAll(this.pausedPartitions);
this.consumer.resume(paused);
this.consumerPaused = false;
publishConsumerResumedEvent(paused);
}
}
private void pausePartitionsIfNecessary() {
Set<TopicPartition> pausedConsumerPartitions = this.consumer.paused();
Collection<TopicPartition> partitions = getAssignedPartitions();
if (partitions != null) {
List<TopicPartition> partitionsToPause = partitions
.stream()
.filter(tp -> isPartitionPauseRequested(tp)
&& !pausedConsumerPartitions.contains(tp))
.collect(Collectors.toList());
if (partitionsToPause.size() > 0) {
this.consumer.pause(partitionsToPause);
this.pausedPartitions.addAll(partitionsToPause);
this.logger.debug(() -> "Paused consumption from " + partitionsToPause);
partitionsToPause.forEach(KafkaMessageListenerContainer.this::publishConsumerPartitionPausedEvent);
}
}
}
private void resumePartitionsIfNecessary() {
Collection<TopicPartition> assigned = getAssignedPartitions();
if (assigned != null) {
List<TopicPartition> partitionsToResume = assigned
.stream()
.filter(tp -> !isPartitionPauseRequested(tp)
&& this.pausedPartitions.contains(tp))
.collect(Collectors.toList());
if (partitionsToResume.size() > 0) {
this.consumer.resume(partitionsToResume);
this.pausedPartitions.removeAll(partitionsToResume);
this.logger.debug(() -> "Resumed consumption from " + partitionsToResume);
partitionsToResume
.forEach(KafkaMessageListenerContainer.this::publishConsumerPartitionResumedEvent);
}
}
}
private void checkIdle() {
Long idleEventInterval = this.containerProperties.getIdleEventInterval();
if (idleEventInterval != null) {
long idleEventInterval2 = idleEventInterval;
long now = System.currentTimeMillis();
if (!this.receivedSome) {
idleEventInterval2 *= this.containerProperties.getIdleBeforeDataMultiplier();
}
if (now > this.lastReceive + idleEventInterval2
&& now > this.lastAlertAt + idleEventInterval2) {
this.wasIdle = true;
publishIdleContainerEvent(now - this.lastReceive, this.consumer, this.consumerPaused);
this.lastAlertAt = now;
if (this.consumerSeekAwareListener != null) {
Collection<TopicPartition> partitions = getAssignedPartitions();
if (partitions != null) {
seekPartitions(partitions, true);
}
}
}
}
}
private void idleBetweenPollIfNecessary() {
long idleBetweenPolls = this.containerProperties.getIdleBetweenPolls();
Collection<TopicPartition> assigned = getAssignedPartitions();
if (idleBetweenPolls > 0 && assigned != null && assigned.size() > 0) {
idleBetweenPolls = Math.min(idleBetweenPolls,
this.maxPollInterval - (System.currentTimeMillis() - this.lastPoll)
- 5000); // NOSONAR - less by five seconds to avoid race condition with rebalance
if (idleBetweenPolls > 0) {
try {
ListenerUtils.stoppableSleep(KafkaMessageListenerContainer.this, idleBetweenPolls);
}
catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new IllegalStateException("Consumer Thread [" + this + "] has been interrupted", ex);
}
}
}
}
private void wrapUp(@Nullable Throwable throwable) {
KafkaUtils.clearConsumerGroupId();
if (this.micrometerHolder != null) {
this.micrometerHolder.destroy();
}
publishConsumerStoppingEvent(this.consumer);
Collection<TopicPartition> partitions = getAssignedPartitions();
if (!this.fatalError) {
if (this.kafkaTxManager == null) {
commitPendingAcks();
try {
this.consumer.unsubscribe();
}
catch (@SuppressWarnings(UNUSED) WakeupException e) {
// No-op. Continue process
}
}
else {
closeProducers(partitions);
}
}
else {
this.logger.error("Fatal consumer exception; stopping container");
KafkaMessageListenerContainer.this.stop(false);
}
this.monitorTask.cancel(true);
if (!this.taskSchedulerExplicitlySet) {
((ThreadPoolTaskScheduler) this.taskScheduler).destroy();
}
this.consumer.close();
getAfterRollbackProcessor().clearThreadState();
if (this.commonErrorHandler != null) {
this.commonErrorHandler.clearThreadState();
}
if (this.consumerSeekAwareListener != null) {
this.consumerSeekAwareListener.onPartitionsRevoked(partitions);
this.consumerSeekAwareListener.unregisterSeekCallback();
}
this.logger.info(() -> getGroupId() + ": Consumer stopped");
publishConsumerStoppedEvent(throwable);
}
/**
* Handle exceptions thrown by the consumer outside of message listener
* invocation (e.g. commit exceptions).
* @param e the exception.
*/
protected void handleConsumerException(Exception e) {
if (e instanceof RetriableCommitFailedException) {
this.logger.error(e, "Commit retries exhausted");
return;
}
try {
if (this.commonErrorHandler != null) {
this.commonErrorHandler.handleOtherException(e, this.consumer,
KafkaMessageListenerContainer.this.thisOrParentContainer, this.isBatchListener);
}
else {
this.logger.error(e, "Consumer exception");
}
}
catch (Exception ex) {
this.logger.error(ex, "Consumer exception");
}
}
private void commitPendingAcks() {
processCommits();
if (this.offsets.size() > 0) {
// we always commit after stopping the invoker
commitIfNecessary();
}
}
/**
* Process any acks that have been queued.
*/
private void handleAcks() {
ConsumerRecord<K, V> record = this.acks.poll();
while (record != null) {
traceAck(record);
processAck(record);
record = this.acks.poll();
}
}
@SuppressWarnings(DEPRECATION)
private void traceAck(ConsumerRecord<K, V> record) {
this.logger.trace(() -> "Ack: " + ListenerUtils.recordToString(record, true));
}
private void doAck(ConsumerRecord<K, V> record) {
traceAck(record);
if (this.offsetsInThisBatch != null) { // NOSONAR (sync)
ackInOrder(record);
}
else {
processAck(record);
}
}
private void processAck(ConsumerRecord<K, V> record) {
if (!Thread.currentThread().equals(this.consumerThread)) {
try {
this.acks.put(record);
if (this.isManualImmediateAck || this.pausedForAsyncAcks) { // NOSONAR (sync)
this.consumer.wakeup();
}
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new KafkaException("Interrupted while storing ack", e);
}
}
else {
if (this.isManualImmediateAck) {
try {
ackImmediate(record);
}
catch (@SuppressWarnings(UNUSED) WakeupException e) {
// ignore - not polling
}
}
else {
addOffset(record);
}
}
}
private void processAcks(ConsumerRecords<K, V> records) {
if (!Thread.currentThread().equals(this.consumerThread)) {
try {
for (ConsumerRecord<K, V> record : records) {
this.acks.put(record);
}
if (this.isManualImmediateAck) {
this.consumer.wakeup();
}
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new KafkaException("Interrupted while storing ack", e);
}
}
else {
if (this.isManualImmediateAck) {
try {
ackImmediate(records);
}
catch (@SuppressWarnings(UNUSED) WakeupException e) {
// ignore - not polling
}
}
else {
for (ConsumerRecord<K, V> record : records) {
addOffset(record);
}
}
}
}
@SuppressWarnings(DEPRECATION)
private synchronized void ackInOrder(ConsumerRecord<K, V> record) {
TopicPartition part = new TopicPartition(record.topic(), record.partition());
List<Long> offs = this.offsetsInThisBatch.get(part);
List<ConsumerRecord<K, V>> deferred = this.deferredOffsets.get(part);
if (offs.size() > 0) {
if (offs.get(0) == record.offset()) {
offs.remove(0);
ConsumerRecord<K, V> recordToAck = record;
if (deferred.size() > 0) {
Collections.sort(deferred, (a, b) -> Long.compare(a.offset(), b.offset()));
while (deferred.size() > 0 && deferred.get(0).offset() == recordToAck.offset() + 1) {
recordToAck = deferred.remove(0);
offs.remove(0);
}
}
processAck(recordToAck);
if (offs.size() == 0) {
this.deferredOffsets.remove(part);
this.offsetsInThisBatch.remove(part);
}
}
else if (record.offset() < offs.get(0)) {
throw new IllegalStateException("First remaining offset for this batch is " + offs.get(0)
+ "; you are acknowledging a stale record: " + ListenerUtils.recordToString(record));
}
else {
deferred.add(record);
}
}
else {
throw new IllegalStateException("Unexpected ack for " + ListenerUtils.recordToString(record)
+ "; offsets list is empty");
}
}
private void ackImmediate(ConsumerRecord<K, V> record) {
Map<TopicPartition, OffsetAndMetadata> commits = Collections.singletonMap(
new TopicPartition(record.topic(), record.partition()),
new OffsetAndMetadata(record.offset() + 1));
this.commitLogger.log(() -> COMMITTING + commits);
if (this.producer != null) {
doSendOffsets(this.producer, commits);
}
else if (this.syncCommits) {
commitSync(commits);
}
else {
commitAsync(commits);
}
}
private void ackImmediate(ConsumerRecords<K, V> records) {
Map<TopicPartition, OffsetAndMetadata> commits = new HashMap<>();
for (TopicPartition part : records.partitions()) {
commits.put(part,
new OffsetAndMetadata(records.records(part)
.get(records.records(part).size() - 1).offset() + 1));
}
this.commitLogger.log(() -> COMMITTING + commits);
if (this.producer != null) {
doSendOffsets(this.producer, commits);
}
else if (this.syncCommits) {
commitSync(commits);
}
else {
commitAsync(commits);
}
}
private void commitAsync(Map<TopicPartition, OffsetAndMetadata> commits) {
this.consumer.commitAsync(commits, (offsetsAttempted, exception) -> {
this.commitCallback.onComplete(offsetsAttempted, exception);
if (exception == null && this.fixTxOffsets) {
this.lastCommits.putAll(commits);
}
});
}
private void invokeListener(final ConsumerRecords<K, V> records) {
if (this.isBatchListener) {
invokeBatchListener(records);
}
else {
invokeRecordListener(records);
}
}
private void invokeBatchListener(final ConsumerRecords<K, V> recordsArg) {
ConsumerRecords<K, V> records = checkEarlyIntercept(recordsArg);
if (records == null || records.count() == 0) {
return;
}
List<ConsumerRecord<K, V>> recordList = null;
if (!this.wantsFullRecords) {
recordList = createRecordList(records);
}
if (this.wantsFullRecords || recordList.size() > 0) {
if (this.transactionTemplate != null) {
invokeBatchListenerInTx(records, recordList); // NOSONAR
}
else {
doInvokeBatchListener(records, recordList); // NOSONAR
}
}
}
@SuppressWarnings(RAWTYPES)
private void invokeBatchListenerInTx(final ConsumerRecords<K, V> records,
@Nullable final List<ConsumerRecord<K, V>> recordList) {
try {
if (this.subBatchPerPartition && this.producerPerConsumerPartition) {
ConsumerRecord<K, V> record = recordList == null ? records.iterator().next() : recordList.get(0);
TransactionSupport
.setTransactionIdSuffix(zombieFenceTxIdSuffix(record.topic(), record.partition())); // NOSONAR
}
this.transactionTemplate.execute(new TransactionCallbackWithoutResult() {
@Override
public void doInTransactionWithoutResult(TransactionStatus s) {
if (ListenerConsumer.this.kafkaTxManager != null) {
ListenerConsumer.this.producer = ((KafkaResourceHolder) TransactionSynchronizationManager
.getResource(ListenerConsumer.this.kafkaTxManager.getProducerFactory()))
.getProducer(); // NOSONAR nullable
}
RuntimeException aborted = doInvokeBatchListener(records, recordList);
if (aborted != null) {
throw aborted;
}
}
});
}
catch (ProducerFencedException | FencedInstanceIdException e) {
this.logger.error(e, "Producer or '"
+ ConsumerConfig.GROUP_INSTANCE_ID_CONFIG
+ "' fenced during transaction");
if (this.containerProperties.isStopContainerWhenFenced()) {
throw new StopAfterFenceException("Container stopping due to fencing", e);
}
}
catch (RuntimeException e) {
this.logger.error(e, "Transaction rolled back");
batchRollback(records, recordList, e);
}
finally {
if (this.subBatchPerPartition && this.producerPerConsumerPartition) {
TransactionSupport.clearTransactionIdSuffix();
}
}
}
private void batchRollback(final ConsumerRecords<K, V> records,
@Nullable final List<ConsumerRecord<K, V>> recordList, RuntimeException e) {
@SuppressWarnings(UNCHECKED)
AfterRollbackProcessor<K, V> afterRollbackProcessorToUse =
(AfterRollbackProcessor<K, V>) getAfterRollbackProcessor();
if (afterRollbackProcessorToUse.isProcessInTransaction() && this.transactionTemplate != null) {
this.transactionTemplate.execute(new TransactionCallbackWithoutResult() {
@Override
protected void doInTransactionWithoutResult(TransactionStatus status) {
batchAfterRollback(records, recordList, e, afterRollbackProcessorToUse);
}
});
}
else {
batchAfterRollback(records, recordList, e, afterRollbackProcessorToUse);
}
}
private void batchAfterRollback(final ConsumerRecords<K, V> records,
@Nullable final List<ConsumerRecord<K, V>> recordList, RuntimeException rollbackException,
AfterRollbackProcessor<K, V> afterRollbackProcessorToUse) {
try {
if (recordList == null) {
afterRollbackProcessorToUse.process(createRecordList(records), this.consumer,
KafkaMessageListenerContainer.this.thisOrParentContainer, rollbackException, false,
this.eosMode);
}
else {
afterRollbackProcessorToUse.process(recordList, this.consumer,
KafkaMessageListenerContainer.this.thisOrParentContainer, rollbackException, false,
this.eosMode);
}
}
catch (KafkaException ke) {
ke.selfLog("AfterRollbackProcessor threw an exception", this.logger);
}
catch (Exception ex) {
this.logger.error(ex, "AfterRollbackProcessor threw an exception");
}
}
private List<ConsumerRecord<K, V>> createRecordList(final ConsumerRecords<K, V> records) {
Iterator<ConsumerRecord<K, V>> iterator = records.iterator();
List<ConsumerRecord<K, V>> list = new LinkedList<>();
while (iterator.hasNext()) {
list.add(iterator.next());
}
return list;
}
/**
* Actually invoke the batch listener.
* @param records the records (needed to invoke the error handler)
* @param recordList the list of records (actually passed to the listener).
* @return an exception.
* @throws Error an error.
*/
@Nullable
private RuntimeException doInvokeBatchListener(final ConsumerRecords<K, V> records, // NOSONAR
List<ConsumerRecord<K, V>> recordList) {
Object sample = startMicrometerSample();
try {
invokeBatchOnMessage(records, recordList);
batchInterceptAfter(records, null);
successTimer(sample);
if (this.batchFailed) {
this.batchFailed = false;
if (this.commonErrorHandler != null) {
this.commonErrorHandler.clearThreadState();
}
getAfterRollbackProcessor().clearThreadState();
}
}
catch (RuntimeException e) {
failureTimer(sample);
batchInterceptAfter(records, e);
if (this.commonErrorHandler == null) {
throw e;
}
try {
this.batchFailed = true;
invokeBatchErrorHandler(records, recordList, e);
commitOffsetsIfNeeded(records);
}
catch (KafkaException ke) {
ke.selfLog(ERROR_HANDLER_THREW_AN_EXCEPTION, this.logger);
return ke;
}
catch (RuntimeException ee) {
this.logger.error(ee, ERROR_HANDLER_THREW_AN_EXCEPTION);
return ee;
}
catch (Error er) { // NOSONAR
this.logger.error(er, "Error handler threw an error");
throw er;
}
}
catch (@SuppressWarnings(UNUSED) InterruptedException e) {
Thread.currentThread().interrupt();
}
return null;
}
private void commitOffsetsIfNeeded(final ConsumerRecords<K, V> records) {
if ((!this.autoCommit && this.commonErrorHandler.isAckAfterHandle())
|| this.producer != null) {
this.acks.addAll(getHighestOffsetRecords(records));
if (this.producer != null) {
sendOffsetsToTransaction();
}
}
}
private void batchInterceptAfter(ConsumerRecords<K, V> records, @Nullable Exception exception) {
if (this.commonBatchInterceptor != null) {
try {
if (exception == null) {
this.commonBatchInterceptor.success(records, this.consumer);
}
else {
this.commonBatchInterceptor.failure(records, exception, this.consumer);
}
}
catch (Exception e) {
this.logger.error(e, "BatchInterceptor.success/failure threw an exception");
}
}
}
@Nullable
private Object startMicrometerSample() {
if (this.micrometerHolder != null) {
return this.micrometerHolder.start();
}
return null;
}
private void successTimer(@Nullable Object sample) {
if (sample != null) {
this.micrometerHolder.success(sample);
}
}
private void failureTimer(@Nullable Object sample) {
if (sample != null) {
this.micrometerHolder.failure(sample, "ListenerExecutionFailedException");
}
}
private void invokeBatchOnMessage(final ConsumerRecords<K, V> records, // NOSONAR - Cyclomatic Complexity
List<ConsumerRecord<K, V>> recordList) throws InterruptedException {
invokeBatchOnMessageWithRecordsOrList(records, recordList);
List<ConsumerRecord<?, ?>> toSeek = null;
if (this.nackSleep >= 0) {
int index = 0;
toSeek = new ArrayList<>();
for (ConsumerRecord<K, V> record : records) {
if (index++ >= this.nackIndex) {
toSeek.add(record);
}
}
}
if (this.producer != null || (!this.isAnyManualAck && !this.autoCommit)) {
if (this.nackSleep < 0) {
for (ConsumerRecord<K, V> record : getHighestOffsetRecords(records)) {
this.acks.put(record);
}
}
if (this.producer != null) {
sendOffsetsToTransaction();
}
}
if (toSeek != null) {
if (!this.autoCommit) {
processCommits();
}
SeekUtils.doSeeks(toSeek, this.consumer, null, true, (rec, ex) -> false, this.logger); // NOSONAR
pauseForNackSleep();
}
}
private void invokeBatchOnMessageWithRecordsOrList(final ConsumerRecords<K, V> recordsArg,
@Nullable List<ConsumerRecord<K, V>> recordListArg) {
ConsumerRecords<K, V> records = recordsArg;
List<ConsumerRecord<K, V>> recordList = recordListArg;
if (this.batchInterceptor != null) {
records = this.batchInterceptor.intercept(recordsArg, this.consumer);
if (records == null) {
this.logger.debug(() -> "BatchInterceptor returned null, skipping: "
+ recordsArg + " with " + recordsArg.count() + " records");
return;
}
else {
recordList = createRecordList(records);
}
}
if (this.wantsFullRecords) {
this.batchListener.onMessage(records, // NOSONAR
this.isAnyManualAck
? new ConsumerBatchAcknowledgment(records)
: null,
this.consumer);
}
else {
doInvokeBatchOnMessage(records, recordList); // NOSONAR
}
}
private void doInvokeBatchOnMessage(final ConsumerRecords<K, V> records,
List<ConsumerRecord<K, V>> recordList) {
try {
switch (this.listenerType) {
case ACKNOWLEDGING_CONSUMER_AWARE:
this.batchListener.onMessage(recordList,
this.isAnyManualAck
? new ConsumerBatchAcknowledgment(records)
: null, this.consumer);
break;
case ACKNOWLEDGING:
this.batchListener.onMessage(recordList,
this.isAnyManualAck
? new ConsumerBatchAcknowledgment(records)
: null);
break;
case CONSUMER_AWARE:
this.batchListener.onMessage(recordList, this.consumer);
break;
case SIMPLE:
this.batchListener.onMessage(recordList);
break;
}
}
catch (Exception ex) { // NOSONAR
throw decorateException(ex);
}
}
private void invokeBatchErrorHandler(final ConsumerRecords<K, V> records,
@Nullable List<ConsumerRecord<K, V>> list, RuntimeException rte) {
this.commonErrorHandler.handleBatch(rte, records, this.consumer,
KafkaMessageListenerContainer.this.thisOrParentContainer,
() -> invokeBatchOnMessageWithRecordsOrList(records, list));
}
private void invokeRecordListener(final ConsumerRecords<K, V> records) {
if (this.transactionTemplate != null) {
invokeRecordListenerInTx(records);
}
else {
doInvokeWithRecords(records);
}
}
/**
* Invoke the listener with each record in a separate transaction.
* @param records the records.
*/
@SuppressWarnings(DEPRECATION) // NOSONAR complexity
private void invokeRecordListenerInTx(final ConsumerRecords<K, V> records) {
Iterator<ConsumerRecord<K, V>> iterator = records.iterator();
while (iterator.hasNext()) {
if (this.stopImmediate && !isRunning()) {
break;
}
final ConsumerRecord<K, V> record = checkEarlyIntercept(iterator.next());
if (record == null) {
continue;
}
this.logger.trace(() -> "Processing " + ListenerUtils.recordToString(record));
try {
invokeInTransaction(iterator, record);
}
catch (ProducerFencedException | FencedInstanceIdException e) {
this.logger.error(e, "Producer or 'group.instance.id' fenced during transaction");
if (this.containerProperties.isStopContainerWhenFenced()) {
throw new StopAfterFenceException("Container stopping due to fencing", e);
}
break;
}
catch (RuntimeException ex) {
this.logger.error(ex, "Transaction rolled back");
recordAfterRollback(iterator, record, ex);
}
finally {
if (this.producerPerConsumerPartition) {
TransactionSupport.clearTransactionIdSuffix();
}
}
if (this.commonRecordInterceptor != null) {
this.commonRecordInterceptor.afterRecord(record, this.consumer);
}
if (this.nackSleep >= 0) {
handleNack(records, record);
break;
}
}
}
private void invokeInTransaction(Iterator<ConsumerRecord<K, V>> iterator, final ConsumerRecord<K, V> record) {
if (this.producerPerConsumerPartition) {
TransactionSupport
.setTransactionIdSuffix(zombieFenceTxIdSuffix(record.topic(), record.partition()));
}
this.transactionTemplate.execute(new TransactionCallbackWithoutResult() {
@Override
public void doInTransactionWithoutResult(TransactionStatus s) {
if (ListenerConsumer.this.kafkaTxManager != null) {
ListenerConsumer.this.producer = ((KafkaResourceHolder) TransactionSynchronizationManager
.getResource(ListenerConsumer.this.kafkaTxManager.getProducerFactory()))
.getProducer(); // NOSONAR
}
RuntimeException aborted = doInvokeRecordListener(record, iterator);
if (aborted != null) {
throw aborted;
}
}
});
}
private void recordAfterRollback(Iterator<ConsumerRecord<K, V>> iterator, final ConsumerRecord<K, V> record,
RuntimeException e) {
List<ConsumerRecord<K, V>> unprocessed = new ArrayList<>();
unprocessed.add(record);
while (iterator.hasNext()) {
unprocessed.add(iterator.next());
}
@SuppressWarnings(UNCHECKED)
AfterRollbackProcessor<K, V> afterRollbackProcessorToUse =
(AfterRollbackProcessor<K, V>) getAfterRollbackProcessor();
if (afterRollbackProcessorToUse.isProcessInTransaction() && this.transactionTemplate != null) {
this.transactionTemplate.execute(new TransactionCallbackWithoutResult() {
@Override
protected void doInTransactionWithoutResult(TransactionStatus status) {
afterRollbackProcessorToUse.process(unprocessed, ListenerConsumer.this.consumer,
KafkaMessageListenerContainer.this.thisOrParentContainer, e, true,
ListenerConsumer.this.eosMode);
}
});
}
else {
try {
afterRollbackProcessorToUse.process(unprocessed, this.consumer,
KafkaMessageListenerContainer.this.thisOrParentContainer, e, true, this.eosMode);
}
catch (KafkaException ke) {
ke.selfLog("AfterRollbackProcessor threw an exception", this.logger);
}
catch (Exception ex) {
this.logger.error(ex, "AfterRollbackProcessor threw exception");
}
}
}
@SuppressWarnings(DEPRECATION)
private void doInvokeWithRecords(final ConsumerRecords<K, V> records) {
Iterator<ConsumerRecord<K, V>> iterator = records.iterator();
while (iterator.hasNext()) {
if (this.stopImmediate && !isRunning()) {
break;
}
final ConsumerRecord<K, V> record = checkEarlyIntercept(iterator.next());
if (record == null) {
continue;
}
this.logger.trace(() -> "Processing " + ListenerUtils.recordToString(record));
doInvokeRecordListener(record, iterator);
if (this.commonRecordInterceptor != null) {
this.commonRecordInterceptor.afterRecord(record, this.consumer);
}
if (this.nackSleep >= 0) {
handleNack(records, record);
break;
}
}
}
@Nullable
private ConsumerRecords<K, V> checkEarlyIntercept(ConsumerRecords<K, V> nextArg) {
ConsumerRecords<K, V> next = nextArg;
if (this.earlyBatchInterceptor != null) {
next = this.earlyBatchInterceptor.intercept(next, this.consumer);
if (next == null) {
this.logger.debug(() -> "BatchInterceptor returned null, skipping: "
+ nextArg + " with " + nextArg.count() + " records");
}
}
return next;
}
@SuppressWarnings(DEPRECATION)
@Nullable
private ConsumerRecord<K, V> checkEarlyIntercept(ConsumerRecord<K, V> recordArg) {
internalHeaders(recordArg);
ConsumerRecord<K, V> record = recordArg;
if (this.earlyRecordInterceptor != null) {
record = this.earlyRecordInterceptor.intercept(record, this.consumer);
if (record == null) {
this.logger.debug(() -> "RecordInterceptor returned null, skipping: "
+ ListenerUtils.recordToString(recordArg));
}
}
return record;
}
private void internalHeaders(final ConsumerRecord<K, V> record) {
if (this.deliveryAttemptAware != null) {
byte[] buff = new byte[4]; // NOSONAR (magic #)
ByteBuffer bb = ByteBuffer.wrap(buff);
bb.putInt(this.deliveryAttemptAware
.deliveryAttempt(
new TopicPartitionOffset(record.topic(), record.partition(), record.offset())));
record.headers().add(new RecordHeader(KafkaHeaders.DELIVERY_ATTEMPT, buff));
}
if (this.listenerinfo != null) {
record.headers().add(this.infoHeader);
}
}
private void handleNack(final ConsumerRecords<K, V> records, final ConsumerRecord<K, V> record) {
if (!this.autoCommit && !this.isRecordAck) {
processCommits();
}
List<ConsumerRecord<?, ?>> list = new ArrayList<>();
Iterator<ConsumerRecord<K, V>> iterator2 = records.iterator();
while (iterator2.hasNext()) {
ConsumerRecord<K, V> next = iterator2.next();
if (next.equals(record) || list.size() > 0) {
list.add(next);
}
}
SeekUtils.doSeeks(list, this.consumer, null, true, (rec, ex) -> false, this.logger); // NOSONAR
pauseForNackSleep();
}
private void pauseForNackSleep() {
if (this.nackSleep > 0) {
this.nackWake = System.currentTimeMillis() + this.nackSleep;
this.nackSleep = -1;
Set<TopicPartition> alreadyPaused = this.consumer.paused();
Collection<TopicPartition> assigned = getAssignedPartitions();
if (assigned != null) {
this.pausedForNack.addAll(assigned);
}
this.pausedForNack.removeAll(alreadyPaused);
this.logger.debug(() -> "Pausing for nack sleep: " + ListenerConsumer.this.pausedForNack);
try {
this.consumer.pause(this.pausedForNack);
}
catch (IllegalStateException ex) {
// this should never happen; defensive, just in case...
this.logger.warn(() -> "Could not pause for nack, possible rebalance in process: "
+ ex.getMessage());
Set<TopicPartition> nowPaused = new HashSet<>(this.consumer.paused());
nowPaused.removeAll(alreadyPaused);
this.consumer.resume(nowPaused);
}
}
}
/**
* Actually invoke the listener.
* @param record the record.
* @param iterator the {@link ConsumerRecords} iterator - used only if a
* {@link RemainingRecordsErrorHandler} is being used.
* @return an exception.
* @throws Error an error.
*/
@Nullable
private RuntimeException doInvokeRecordListener(final ConsumerRecord<K, V> record, // NOSONAR
Iterator<ConsumerRecord<K, V>> iterator) {
Object sample = startMicrometerSample();
try {
invokeOnMessage(record);
successTimer(sample);
recordInterceptAfter(record, null);
}
catch (RuntimeException e) {
failureTimer(sample);
recordInterceptAfter(record, e);
if (this.commonErrorHandler == null) {
throw e;
}
try {
invokeErrorHandler(record, iterator, e);
commitOffsetsIfNeeded(record);
}
catch (KafkaException ke) {
ke.selfLog(ERROR_HANDLER_THREW_AN_EXCEPTION, this.logger);
return ke;
}
catch (RuntimeException ee) {
this.logger.error(ee, ERROR_HANDLER_THREW_AN_EXCEPTION);
return ee;
}
catch (Error er) { // NOSONAR
this.logger.error(er, "Error handler threw an error");
throw er;
}
}
return null;
}
private void commitOffsetsIfNeeded(final ConsumerRecord<K, V> record) {
if ((!this.autoCommit && this.commonErrorHandler.isAckAfterHandle())
|| this.producer != null) {
if (this.isManualAck) {
this.commitRecovered = true;
}
ackCurrent(record);
if (this.isManualAck) {
this.commitRecovered = false;
}
}
}
private void recordInterceptAfter(ConsumerRecord<K, V> records, @Nullable Exception exception) {
if (this.commonRecordInterceptor != null) {
try {
if (exception == null) {
this.commonRecordInterceptor.success(records, this.consumer);
}
else {
this.commonRecordInterceptor.failure(records, exception, this.consumer);
}
}
catch (Exception e) {
this.logger.error(e, "RecordInterceptor.success/failure threw an exception");
}
}
}
private void invokeOnMessage(final ConsumerRecord<K, V> record) {
if (record.value() instanceof DeserializationException) {
throw (DeserializationException) record.value();
}
if (record.key() instanceof DeserializationException) {
throw (DeserializationException) record.key();
}
if (record.value() == null && this.checkNullValueForExceptions) {
checkDeser(record, SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER);
}
if (record.key() == null && this.checkNullKeyForExceptions) {
checkDeser(record, SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER);
}
doInvokeOnMessage(record);
if (this.nackSleep < 0 && !this.isManualImmediateAck) {
ackCurrent(record);
}
}
@SuppressWarnings(DEPRECATION)
private void doInvokeOnMessage(final ConsumerRecord<K, V> recordArg) {
ConsumerRecord<K, V> record = recordArg;
if (this.recordInterceptor != null) {
record = this.recordInterceptor.intercept(record, this.consumer);
}
if (record == null) {
this.logger.debug(() -> ("RecordInterceptor returned null, skipping: "
+ ListenerUtils.recordToString(recordArg)));
}
else {
try {
switch (this.listenerType) {
case ACKNOWLEDGING_CONSUMER_AWARE:
this.listener.onMessage(record,
this.isAnyManualAck
? new ConsumerAcknowledgment(record)
: null, this.consumer);
break;
case CONSUMER_AWARE:
this.listener.onMessage(record, this.consumer);
break;
case ACKNOWLEDGING:
this.listener.onMessage(record,
this.isAnyManualAck
? new ConsumerAcknowledgment(record)
: null);
break;
case SIMPLE:
this.listener.onMessage(record);
break;
}
}
catch (Exception ex) { // NOSONAR
throw decorateException(ex);
}
}
}
private void invokeErrorHandler(final ConsumerRecord<K, V> record,
Iterator<ConsumerRecord<K, V>> iterator, RuntimeException rte) {
if (this.commonErrorHandler.remainingRecords()) {
if (this.producer == null) {
processCommits();
}
List<ConsumerRecord<?, ?>> records = new ArrayList<>();
records.add(record);
while (iterator.hasNext()) {
records.add(iterator.next());
}
this.commonErrorHandler.handleRemaining(rte, records, this.consumer,
KafkaMessageListenerContainer.this.thisOrParentContainer);
}
else {
this.commonErrorHandler.handleRecord(rte, record, this.consumer,
KafkaMessageListenerContainer.this.thisOrParentContainer);
}
}
private RuntimeException decorateException(Exception ex) {
Exception toHandle = ex;
if (toHandle instanceof ListenerExecutionFailedException) {
toHandle = new ListenerExecutionFailedException(toHandle.getMessage(), this.consumerGroupId,
toHandle.getCause()); // NOSONAR restored below
fixStackTrace(ex, toHandle);
}
else {
toHandle = new ListenerExecutionFailedException("Listener failed", this.consumerGroupId, toHandle);
}
return (RuntimeException) toHandle;
}
private void fixStackTrace(Exception ex, Exception toHandle) {
try {
StackTraceElement[] stackTrace = ex.getStackTrace();
if (stackTrace != null && stackTrace.length > 0) {
StackTraceElement[] stackTrace2 = toHandle.getStackTrace();
if (stackTrace2 != null) {
int matching = -1;
for (int i = 0; i < stackTrace2.length; i++) {
StackTraceElement se2 = stackTrace[i];
for (StackTraceElement se : stackTrace2) {
if (se2.equals(se)) {
matching = i;
break;
}
}
if (matching >= 0) {
break;
}
}
if (matching >= 0) {
StackTraceElement[] merged = new StackTraceElement[matching];
System.arraycopy(stackTrace, 0, merged, 0, matching);
ListenerExecutionFailedException suppressed =
new ListenerExecutionFailedException("Restored Stack Trace");
suppressed.setStackTrace(merged);
toHandle.addSuppressed(suppressed);
}
}
}
}
catch (Exception ex2) {
this.logger.debug(ex2,
"Could not restore the stack trace when decorating the LEFE with the group id");
}
}
public void checkDeser(final ConsumerRecord<K, V> record, String headerName) {
DeserializationException exception = ListenerUtils.getExceptionFromHeader(record, headerName, this.logger);
if (exception != null) {
/*
* Wrapping in a LEFE is not strictly correct, but required for backwards compatibility.
*/
throw decorateException(exception);
}
}
public void ackCurrent(final ConsumerRecord<K, V> record) {
if (this.isRecordAck) {
Map<TopicPartition, OffsetAndMetadata> offsetsToCommit =
Collections.singletonMap(new TopicPartition(record.topic(), record.partition()),
new OffsetAndMetadata(record.offset() + 1));
if (this.producer == null) {
this.commitLogger.log(() -> COMMITTING + offsetsToCommit);
if (this.syncCommits) {
commitSync(offsetsToCommit);
}
else {
commitAsync(offsetsToCommit);
}
}
else {
this.acks.add(record);
}
}
else if (this.producer != null
|| ((!this.isAnyManualAck || this.commitRecovered) && !this.autoCommit)) {
this.acks.add(record);
}
if (this.producer != null) {
sendOffsetsToTransaction();
}
}
private void sendOffsetsToTransaction() {
handleAcks();
Map<TopicPartition, OffsetAndMetadata> commits = buildCommits();
this.commitLogger.log(() -> "Sending offsets to transaction: " + commits);
doSendOffsets(this.producer, commits);
}
@SuppressWarnings(DEPRECATION)
private void doSendOffsets(Producer<?, ?> prod, Map<TopicPartition, OffsetAndMetadata> commits) {
if (this.eosMode.getMode().equals(EOSMode.V1)) {
prod.sendOffsetsToTransaction(commits, this.consumerGroupId);
}
else {
prod.sendOffsetsToTransaction(commits, this.consumer.groupMetadata());
}
if (this.fixTxOffsets) {
this.lastCommits.putAll(commits);
}
}
private void processCommits() {
this.count += this.acks.size();
handleAcks();
AckMode ackMode = this.containerProperties.getAckMode();
if (!this.isManualImmediateAck) {
if (!this.isManualAck) {
updatePendingOffsets();
}
boolean countExceeded = this.isCountAck && this.count >= this.containerProperties.getAckCount();
if ((!this.isTimeOnlyAck && !this.isCountAck) || countExceeded) {
if (this.isCountAck) {
this.logger.debug(() -> "Committing in " + ackMode.name() + " because count "
+ this.count
+ " exceeds configured limit of " + this.containerProperties.getAckCount());
}
commitIfNecessary();
this.count = 0;
}
else {
timedAcks(ackMode);
}
}
}
private void timedAcks(AckMode ackMode) {
long now;
now = System.currentTimeMillis();
boolean elapsed = now - this.last > this.containerProperties.getAckTime();
if (ackMode.equals(AckMode.TIME) && elapsed) {
this.logger.debug(() -> "Committing in AckMode.TIME " +
"because time elapsed exceeds configured limit of " +
this.containerProperties.getAckTime());
commitIfNecessary();
this.last = now;
}
else if (ackMode.equals(AckMode.COUNT_TIME) && elapsed) {
this.logger.debug(() -> "Committing in AckMode.COUNT_TIME " +
"because time elapsed exceeds configured limit of " +
this.containerProperties.getAckTime());
commitIfNecessary();
this.last = now;
this.count = 0;
}
}
private void processSeeks() {
processTimestampSeeks();
TopicPartitionOffset offset = this.seeks.poll();
while (offset != null) {
traceSeek(offset);
try {
SeekPosition position = offset.getPosition();
Long whereTo = offset.getOffset();
if (position == null) {
if (offset.isRelativeToCurrent()) {
whereTo += this.consumer.position(offset.getTopicPartition());
whereTo = Math.max(whereTo, 0);
}
this.consumer.seek(offset.getTopicPartition(), whereTo);
}
else if (position.equals(SeekPosition.BEGINNING)) {
this.consumer.seekToBeginning(Collections.singletonList(offset.getTopicPartition()));
if (whereTo != null) {
this.consumer.seek(offset.getTopicPartition(), whereTo);
}
}
else if (position.equals(SeekPosition.TIMESTAMP)) {
// possible late addition since the grouped processing above
Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes = this.consumer
.offsetsForTimes(
Collections.singletonMap(offset.getTopicPartition(), offset.getOffset()));
offsetsForTimes.forEach((tp, ot) -> {
if (ot != null) {
this.consumer.seek(tp, ot.offset());
}
});
}
else {
this.consumer.seekToEnd(Collections.singletonList(offset.getTopicPartition()));
if (whereTo != null) {
whereTo += this.consumer.position(offset.getTopicPartition());
this.consumer.seek(offset.getTopicPartition(), whereTo);
}
}
}
catch (Exception e) {
TopicPartitionOffset offsetToLog = offset;
this.logger.error(e, () -> "Exception while seeking " + offsetToLog);
}
offset = this.seeks.poll();
}
}
private void processTimestampSeeks() {
Iterator<TopicPartitionOffset> seekIterator = this.seeks.iterator();
Map<TopicPartition, Long> timestampSeeks = null;
while (seekIterator.hasNext()) {
TopicPartitionOffset tpo = seekIterator.next();
if (SeekPosition.TIMESTAMP.equals(tpo.getPosition())) {
if (timestampSeeks == null) {
timestampSeeks = new HashMap<>();
}
timestampSeeks.put(tpo.getTopicPartition(), tpo.getOffset());
seekIterator.remove();
traceSeek(tpo);
}
}
if (timestampSeeks != null) {
Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes = this.consumer.offsetsForTimes(timestampSeeks);
offsetsForTimes.forEach((tp, ot) -> {
if (ot != null) {
this.consumer.seek(tp, ot.offset());
}
});
}
}
private void traceSeek(TopicPartitionOffset offset) {
this.logger.trace(() -> "Seek: " + offset);
}
private void initPartitionsIfNeeded() {
/*
* Note: initial position setting is only supported with explicit topic assignment.
* When using auto assignment (subscribe), the ConsumerRebalanceListener is not
* called until we poll() the consumer. Users can use a ConsumerAwareRebalanceListener
* or a ConsumerSeekAware listener in that case.
*/
Map<TopicPartition, OffsetMetadata> partitions = new LinkedHashMap<>(this.definedPartitions);
Set<TopicPartition> beginnings = partitions.entrySet().stream()
.filter(e -> SeekPosition.BEGINNING.equals(e.getValue().seekPosition))
.map(Entry::getKey)
.collect(Collectors.toSet());
beginnings.forEach(partitions::remove);
Set<TopicPartition> ends = partitions.entrySet().stream()
.filter(e -> SeekPosition.END.equals(e.getValue().seekPosition))
.map(Entry::getKey)
.collect(Collectors.toSet());
ends.forEach(partitions::remove);
Map<TopicPartition, Long> times = partitions.entrySet().stream()
.filter(e -> SeekPosition.TIMESTAMP.equals(e.getValue().seekPosition))
.collect(Collectors.toMap(Entry::getKey, entry -> entry.getValue().offset));
Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes = this.consumer.offsetsForTimes(times);
offsetsForTimes.forEach((tp, off) -> {
if (off == null) {
ends.add(tp);
}
else {
partitions.put(tp, new OffsetMetadata(off.offset(), false, SeekPosition.TIMESTAMP));
}
});
doInitialSeeks(partitions, beginnings, ends);
if (this.consumerSeekAwareListener != null) {
this.consumerSeekAwareListener.onPartitionsAssigned(this.definedPartitions.keySet().stream()
.map(tp -> new SimpleEntry<>(tp, this.consumer.position(tp)))
.collect(Collectors.toMap(entry -> entry.getKey(), entry -> entry.getValue())),
this.seekCallback);
}
}
private void doInitialSeeks(Map<TopicPartition, OffsetMetadata> partitions, Set<TopicPartition> beginnings,
Set<TopicPartition> ends) {
if (beginnings.size() > 0) {
this.consumer.seekToBeginning(beginnings);
}
if (ends.size() > 0) {
this.consumer.seekToEnd(ends);
}
for (Entry<TopicPartition, OffsetMetadata> entry : partitions.entrySet()) {
TopicPartition topicPartition = entry.getKey();
OffsetMetadata metadata = entry.getValue();
Long offset = metadata.offset;
if (offset != null) {
long newOffset = offset;
if (offset < 0) {
if (!metadata.relativeToCurrent) {
this.consumer.seekToEnd(Collections.singletonList(topicPartition));
}
newOffset = Math.max(0, this.consumer.position(topicPartition) + offset);
}
else if (metadata.relativeToCurrent) {
newOffset = this.consumer.position(topicPartition) + offset;
}
try {
this.consumer.seek(topicPartition, newOffset);
logReset(topicPartition, newOffset);
}
catch (Exception e) {
long newOffsetToLog = newOffset;
this.logger.error(e, () -> "Failed to set initial offset for " + topicPartition
+ " at " + newOffsetToLog + ". Position is " + this.consumer.position(topicPartition));
}
}
}
}
private void logReset(TopicPartition topicPartition, long newOffset) {
this.logger.debug(() -> "Reset " + topicPartition + " to offset " + newOffset);
}
private void updatePendingOffsets() {
ConsumerRecord<K, V> record = this.acks.poll();
while (record != null) {
addOffset(record);
record = this.acks.poll();
}
}
private void addOffset(ConsumerRecord<K, V> record) {
this.offsets.computeIfAbsent(record.topic(), v -> new ConcurrentHashMap<>())
.compute(record.partition(), (k, v) -> v == null ? record.offset() : Math.max(v, record.offset()));
}
private void commitIfNecessary() {
Map<TopicPartition, OffsetAndMetadata> commits = buildCommits();
this.logger.debug(() -> "Commit list: " + commits);
if (!commits.isEmpty()) {
this.commitLogger.log(() -> COMMITTING + commits);
try {
if (this.syncCommits) {
commitSync(commits);
}
else {
commitAsync(commits);
}
}
catch (@SuppressWarnings(UNUSED) WakeupException e) {
// ignore - not polling
this.logger.debug("Woken up during commit");
}
}
}
private void commitSync(Map<TopicPartition, OffsetAndMetadata> commits) {
doCommitSync(commits, 0);
}
private void doCommitSync(Map<TopicPartition, OffsetAndMetadata> commits, int retries) {
try {
this.consumer.commitSync(commits, this.syncCommitTimeout);
if (this.fixTxOffsets) {
this.lastCommits.putAll(commits);
}
}
catch (RetriableCommitFailedException e) {
if (retries >= this.containerProperties.getCommitRetries()) {
throw e;
}
doCommitSync(commits, retries + 1);
}
catch (RebalanceInProgressException e) {
this.logger.debug(e, "Non-fatal commit failure");
this.commitsDuringRebalance.putAll(commits);
}
}
private Map<TopicPartition, OffsetAndMetadata> buildCommits() {
Map<TopicPartition, OffsetAndMetadata> commits = new HashMap<>();
for (Entry<String, Map<Integer, Long>> entry : this.offsets.entrySet()) {
for (Entry<Integer, Long> offset : entry.getValue().entrySet()) {
commits.put(new TopicPartition(entry.getKey(), offset.getKey()),
new OffsetAndMetadata(offset.getValue() + 1));
}
}
this.offsets.clear();
return commits;
}
private Collection<ConsumerRecord<K, V>> getHighestOffsetRecords(ConsumerRecords<K, V> records) {
return records.partitions()
.stream()
.collect(Collectors.toMap(tp -> tp, tp -> {
List<ConsumerRecord<K, V>> recordList = records.records(tp);
return recordList.get(recordList.size() - 1);
}))
.values();
}
@Override
public void seek(String topic, int partition, long offset) {
this.seeks.add(new TopicPartitionOffset(topic, partition, offset));
}
@Override
public void seekToBeginning(String topic, int partition) {
this.seeks.add(new TopicPartitionOffset(topic, partition, SeekPosition.BEGINNING));
}
@Override
public void seekToBeginning(Collection<TopicPartition> partitions) {
this.seeks.addAll(partitions.stream()
.map(tp -> new TopicPartitionOffset(tp.topic(), tp.partition(), SeekPosition.BEGINNING))
.collect(Collectors.toList()));
}
@Override
public void seekToEnd(String topic, int partition) {
this.seeks.add(new TopicPartitionOffset(topic, partition, SeekPosition.END));
}
@Override
public void seekToEnd(Collection<TopicPartition> partitions) {
this.seeks.addAll(partitions.stream()
.map(tp -> new TopicPartitionOffset(tp.topic(), tp.partition(), SeekPosition.END))
.collect(Collectors.toList()));
}
@Override
public void seekRelative(String topic, int partition, long offset, boolean toCurrent) {
if (toCurrent) {
this.seeks.add(new TopicPartitionOffset(topic, partition, offset, toCurrent));
}
else if (offset >= 0) {
this.seeks.add(new TopicPartitionOffset(topic, partition, offset, SeekPosition.BEGINNING));
}
else {
this.seeks.add(new TopicPartitionOffset(topic, partition, offset, SeekPosition.END));
}
}
@Override
public void seekToTimestamp(String topic, int partition, long timestamp) {
this.seeks.add(new TopicPartitionOffset(topic, partition, timestamp, SeekPosition.TIMESTAMP));
}
@Override
public void seekToTimestamp(Collection<TopicPartition> topicParts, long timestamp) {
topicParts.forEach(tp -> seekToTimestamp(tp.topic(), tp.partition(), timestamp));
}
@Override
public String toString() {
return "KafkaMessageListenerContainer.ListenerConsumer ["
+ "\ncontainerProperties=" + this.containerProperties
+ "\nother properties ["
+ "\n listenerType=" + this.listenerType
+ "\n isConsumerAwareListener=" + this.isConsumerAwareListener
+ "\n isBatchListener=" + this.isBatchListener
+ "\n autoCommit=" + this.autoCommit
+ "\n consumerGroupId=" + this.consumerGroupId
+ "\n clientIdSuffix=" + KafkaMessageListenerContainer.this.clientIdSuffix
+ "\n]";
}
private void closeProducers(@Nullable Collection<TopicPartition> partitions) {
if (partitions != null) {
ProducerFactory<?, ?> producerFactory = this.kafkaTxManager.getProducerFactory();
partitions.forEach(tp -> {
try {
producerFactory.closeProducerFor(zombieFenceTxIdSuffix(tp.topic(), tp.partition()));
}
catch (Exception e) {
this.logger.error(e, () -> "Failed to close producer with transaction id suffix: "
+ zombieFenceTxIdSuffix(tp.topic(), tp.partition()));
}
});
}
}
private String zombieFenceTxIdSuffix(String topic, int partition) {
return this.consumerGroupId + "." + topic + "." + partition;
}
private final class ConsumerAcknowledgment implements Acknowledgment {
private final ConsumerRecord<K, V> record;
private volatile boolean acked;
ConsumerAcknowledgment(ConsumerRecord<K, V> record) {
this.record = record;
}
@Override
public void acknowledge() {
if (!this.acked) {
doAck(this.record);
this.acked = true;
}
}
@Override
public void nack(long sleep) {
Assert.state(Thread.currentThread().equals(ListenerConsumer.this.consumerThread),
"nack() can only be called on the consumer thread");
Assert.isTrue(sleep >= 0, "sleep cannot be negative");
ListenerConsumer.this.nackSleep = sleep;
synchronized (ListenerConsumer.this) {
if (ListenerConsumer.this.offsetsInThisBatch != null) {
ListenerConsumer.this.offsetsInThisBatch.forEach((part, recs) -> recs.clear());
ListenerConsumer.this.deferredOffsets.forEach((part, recs) -> recs.clear());
}
}
}
@Override
@SuppressWarnings(DEPRECATION)
public String toString() {
return "Acknowledgment for " + ListenerUtils.recordToString(this.record, true);
}
}
private final class ConsumerBatchAcknowledgment implements Acknowledgment {
private final ConsumerRecords<K, V> records;
private volatile boolean acked;
ConsumerBatchAcknowledgment(ConsumerRecords<K, V> records) {
this.records = records;
}
@Override
public void acknowledge() {
Map<TopicPartition, List<Long>> offs = ListenerConsumer.this.offsetsInThisBatch;
Map<TopicPartition, List<ConsumerRecord<K, V>>> deferred = ListenerConsumer.this.deferredOffsets;
if (!this.acked) {
for (ConsumerRecord<K, V> record : getHighestOffsetRecords(this.records)) {
if (offs != null) {
offs.remove(new TopicPartition(record.topic(), record.partition()));
deferred.remove(new TopicPartition(record.topic(), record.partition()));
}
}
processAcks(this.records);
this.acked = true;
}
}
@Override
public void nack(int index, long sleep) {
Assert.state(Thread.currentThread().equals(ListenerConsumer.this.consumerThread),
"nack() can only be called on the consumer thread");
Assert.isTrue(sleep >= 0, "sleep cannot be negative");
Assert.isTrue(index >= 0 && index < this.records.count(), "index out of bounds");
ListenerConsumer.this.nackIndex = index;
ListenerConsumer.this.nackSleep = sleep;
synchronized (ListenerConsumer.this) {
if (ListenerConsumer.this.offsetsInThisBatch != null) {
ListenerConsumer.this.offsetsInThisBatch.forEach((part, recs) -> recs.clear());
ListenerConsumer.this.deferredOffsets.forEach((part, recs) -> recs.clear());
}
}
int i = 0;
List<ConsumerRecord<K, V>> toAck = new LinkedList<>();
for (ConsumerRecord<K, V> record : this.records) {
if (i++ < index) {
toAck.add(record);
}
else {
break;
}
}
Map<TopicPartition, List<ConsumerRecord<K, V>>> newRecords = new HashMap<>();
for (ConsumerRecord<K, V> record : toAck) {
newRecords.computeIfAbsent(new TopicPartition(record.topic(), record.partition()),
tp -> new LinkedList<>()).add(record);
}
processAcks(new ConsumerRecords<K, V>(newRecords));
}
@Override
public String toString() {
return "Acknowledgment for " + this.records;
}
}
private class ListenerConsumerRebalanceListener implements ConsumerRebalanceListener {
private final ConsumerRebalanceListener userListener = getContainerProperties()
.getConsumerRebalanceListener();
private final ConsumerAwareRebalanceListener consumerAwareListener =
this.userListener instanceof ConsumerAwareRebalanceListener
? (ConsumerAwareRebalanceListener) this.userListener : null;
ListenerConsumerRebalanceListener() {
}
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
try {
if (this.consumerAwareListener != null) {
this.consumerAwareListener.onPartitionsRevokedBeforeCommit(ListenerConsumer.this.consumer,
partitions);
}
else {
this.userListener.onPartitionsRevoked(partitions);
}
try {
// Wait until now to commit, in case the user listener added acks
commitPendingAcks();
fixTxOffsetsIfNeeded();
}
catch (Exception e) {
ListenerConsumer.this.logger.error(e, () -> "Fatal commit error after revocation "
+ partitions);
}
if (this.consumerAwareListener != null) {
this.consumerAwareListener.onPartitionsRevokedAfterCommit(ListenerConsumer.this.consumer,
partitions);
}
if (ListenerConsumer.this.consumerSeekAwareListener != null) {
ListenerConsumer.this.consumerSeekAwareListener.onPartitionsRevoked(partitions);
}
if (ListenerConsumer.this.assignedPartitions != null) {
ListenerConsumer.this.assignedPartitions.removeAll(partitions);
}
ListenerConsumer.this.pausedForNack.removeAll(partitions);
partitions.forEach(tp -> ListenerConsumer.this.lastCommits.remove(tp));
synchronized (ListenerConsumer.this) {
if (ListenerConsumer.this.offsetsInThisBatch != null) {
partitions.forEach(tp -> {
ListenerConsumer.this.offsetsInThisBatch.remove(tp);
ListenerConsumer.this.deferredOffsets.remove(tp);
});
}
}
}
finally {
if (ListenerConsumer.this.kafkaTxManager != null) {
closeProducers(partitions);
}
}
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
if (ListenerConsumer.this.consumerPaused) {
ListenerConsumer.this.consumer.pause(partitions);
ListenerConsumer.this.logger.warn("Paused consumer resumed by Kafka due to rebalance; "
+ "consumer paused again, so the initial poll() will never return any records");
}
if (ListenerConsumer.this.pausedForNack.size() > 0) {
ListenerConsumer.this.consumer.pause(ListenerConsumer.this.pausedForNack);
}
ListenerConsumer.this.assignedPartitions.addAll(partitions);
if (ListenerConsumer.this.commitCurrentOnAssignment
&& !collectAndCommitIfNecessary(partitions)) {
return;
}
if (ListenerConsumer.this.genericListener instanceof ConsumerSeekAware) {
seekPartitions(partitions, false);
}
if (this.consumerAwareListener != null) {
this.consumerAwareListener.onPartitionsAssigned(ListenerConsumer.this.consumer, partitions);
}
else {
this.userListener.onPartitionsAssigned(partitions);
}
}
private boolean collectAndCommitIfNecessary(Collection<TopicPartition> partitions) {
// Commit initial positions - this is generally redundant but
// it protects us from the case when another consumer starts
// and rebalance would cause it to reset at the end
// see https://github.com/spring-projects/spring-kafka/issues/110
Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>();
Map<TopicPartition, OffsetAndMetadata> committed =
ListenerConsumer.this.consumer.committed(new HashSet<>(partitions));
for (TopicPartition partition : partitions) {
try {
if (committed.get(partition) == null) { // no existing commit for this group
offsetsToCommit.put(partition,
new OffsetAndMetadata(ListenerConsumer.this.consumer.position(partition)));
}
}
catch (NoOffsetForPartitionException e) {
ListenerConsumer.this.fatalError = true;
ListenerConsumer.this.logger.error(e, "No offset and no reset policy");
return false;
}
}
if (offsetsToCommit.size() > 0) {
commitCurrentOffsets(offsetsToCommit);
}
return true;
}
private void commitCurrentOffsets(Map<TopicPartition, OffsetAndMetadata> offsetsToCommit) {
ListenerConsumer.this.commitLogger.log(() -> "Committing on assignment: " + offsetsToCommit);
if (ListenerConsumer.this.transactionTemplate != null
&& ListenerConsumer.this.kafkaTxManager != null
&& !AssignmentCommitOption.LATEST_ONLY_NO_TX.equals(ListenerConsumer.this.autoCommitOption)) {
try {
offsetsToCommit.forEach((partition, offsetAndMetadata) -> {
if (ListenerConsumer.this.producerPerConsumerPartition) {
TransactionSupport.setTransactionIdSuffix(
zombieFenceTxIdSuffix(partition.topic(), partition.partition()));
}
ListenerConsumer.this.transactionTemplate
.execute(new TransactionCallbackWithoutResult() {
@Override
protected void doInTransactionWithoutResult(TransactionStatus status) {
KafkaResourceHolder<?, ?> holder =
(KafkaResourceHolder<?, ?>) TransactionSynchronizationManager
.getResource(ListenerConsumer.this.kafkaTxManager
.getProducerFactory());
if (holder != null) {
doSendOffsets(holder.getProducer(),
Collections.singletonMap(partition, offsetAndMetadata));
}
}
});
});
}
finally {
TransactionSupport.clearTransactionIdSuffix();
}
}
else {
ContainerProperties containerProps = KafkaMessageListenerContainer.this.getContainerProperties();
if (containerProps.isSyncCommits()) {
try {
ListenerConsumer.this.consumer.commitSync(offsetsToCommit,
containerProps.getSyncCommitTimeout());
}
catch (RetriableCommitFailedException | RebalanceInProgressException e) {
// ignore since this is on assignment anyway
}
}
else {
commitAsync(offsetsToCommit);
}
}
}
@Override
public void onPartitionsLost(Collection<TopicPartition> partitions) {
if (this.consumerAwareListener != null) {
this.consumerAwareListener.onPartitionsLost(ListenerConsumer.this.consumer, partitions);
}
else {
this.userListener.onPartitionsLost(partitions);
}
onPartitionsRevoked(partitions);
}
}
private final class InitialOrIdleSeekCallback implements ConsumerSeekCallback {
InitialOrIdleSeekCallback() {
}
@Override
public void seek(String topic, int partition, long offset) {
ListenerConsumer.this.consumer.seek(new TopicPartition(topic, partition), offset);
}
@Override
public void seekToBeginning(String topic, int partition) {
ListenerConsumer.this.consumer.seekToBeginning(
Collections.singletonList(new TopicPartition(topic, partition)));
}
@Override
public void seekToBeginning(Collection<TopicPartition> partitions) {
ListenerConsumer.this.consumer.seekToBeginning(partitions);
}
@Override
public void seekToEnd(String topic, int partition) {
ListenerConsumer.this.consumer.seekToEnd(
Collections.singletonList(new TopicPartition(topic, partition)));
}
@Override
public void seekToEnd(Collection<TopicPartition> partitions) {
ListenerConsumer.this.consumer.seekToEnd(partitions);
}
@Override
public void seekRelative(String topic, int partition, long offset, boolean toCurrent) {
TopicPartition topicPart = new TopicPartition(topic, partition);
Long whereTo = null;
Consumer<K, V> consumerToSeek = ListenerConsumer.this.consumer;
if (offset >= 0) {
whereTo = computeForwardWhereTo(offset, toCurrent, topicPart, consumerToSeek);
}
else {
whereTo = computeBackwardWhereTo(offset, toCurrent, topicPart, consumerToSeek);
}
if (whereTo != null) {
consumerToSeek.seek(topicPart, whereTo);
}
}
@Override
public void seekToTimestamp(String topic, int partition, long timestamp) {
Consumer<K, V> consumerToSeek = ListenerConsumer.this.consumer;
Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes = consumerToSeek.offsetsForTimes(
Collections.singletonMap(new TopicPartition(topic, partition), timestamp));
offsetsForTimes.forEach((tp, ot) -> {
if (ot != null) {
consumerToSeek.seek(tp, ot.offset());
}
});
}
@Override
public void seekToTimestamp(Collection<TopicPartition> topicParts, long timestamp) {
Consumer<K, V> consumerToSeek = ListenerConsumer.this.consumer;
Map<TopicPartition, Long> map = topicParts.stream()
.collect(Collectors.toMap(tp -> tp, tp -> timestamp));
Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes = consumerToSeek.offsetsForTimes(map);
offsetsForTimes.forEach((tp, ot) -> {
if (ot != null) {
consumerToSeek.seek(tp, ot.offset());
}
});
}
@Nullable
private Long computeForwardWhereTo(long offset, boolean toCurrent, TopicPartition topicPart,
Consumer<K, V> consumerToSeek) {
Long start;
if (!toCurrent) {
Map<TopicPartition, Long> beginning = consumerToSeek
.beginningOffsets(Collections.singletonList(topicPart));
start = beginning.get(topicPart);
}
else {
start = consumerToSeek.position(topicPart);
}
if (start != null) {
return start + offset;
}
return null;
}
@Nullable
private Long computeBackwardWhereTo(long offset, boolean toCurrent, TopicPartition topicPart,
Consumer<K, V> consumerToSeek) {
Long end;
if (!toCurrent) {
Map<TopicPartition, Long> endings = consumerToSeek
.endOffsets(Collections.singletonList(topicPart));
end = endings.get(topicPart);
}
else {
end = consumerToSeek.position(topicPart);
}
if (end != null) {
long newOffset = end + offset;
return newOffset < 0 ? 0 : newOffset;
}
return null;
}
}
}
private static final class OffsetMetadata {
final Long offset; // NOSONAR
final boolean relativeToCurrent; // NOSONAR
final SeekPosition seekPosition; // NOSONAR
OffsetMetadata(Long offset, boolean relativeToCurrent, SeekPosition seekPosition) {
this.offset = offset;
this.relativeToCurrent = relativeToCurrent;
this.seekPosition = seekPosition;
}
}
private class StopCallback implements ListenableFutureCallback<Object> {
private final Runnable callback;
StopCallback(Runnable callback) {
this.callback = callback;
}
@Override
public void onFailure(Throwable e) {
KafkaMessageListenerContainer.this.logger
.error(e, "Error while stopping the container: ");
if (this.callback != null) {
this.callback.run();
}
}
@Override
public void onSuccess(Object result) {
KafkaMessageListenerContainer.this.logger
.debug(() -> KafkaMessageListenerContainer.this + " stopped normally");
if (this.callback != null) {
this.callback.run();
}
}
}
@SuppressWarnings("serial")
private static class StopAfterFenceException extends KafkaException {
StopAfterFenceException(String message, Throwable t) {
super(message, t);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ListenerContainerRegistry.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.Collection;
import java.util.Set;
import org.springframework.lang.Nullable;
/**
* A registry for listener containers.
*
* @author Gary Russell
* @since 2.7
*
*/
public interface ListenerContainerRegistry {
/**
* Return the {@link MessageListenerContainer} with the specified id or
* {@code null} if no such container exists.
* @param id the id of the container
* @return the container or {@code null} if no container with that id exists
* @see org.springframework.kafka.config.KafkaListenerEndpoint#getId()
* @see #getListenerContainerIds()
*/
@Nullable
MessageListenerContainer getListenerContainer(String id);
/**
* Return the ids of the managed {@link MessageListenerContainer} instance(s).
* @return the ids.
* @see #getListenerContainer(String)
*/
Set<String> getListenerContainerIds();
/**
* Return the managed {@link MessageListenerContainer} instance(s).
* @return the managed {@link MessageListenerContainer} instance(s).
* @see #getAllListenerContainers()
*/
Collection<MessageListenerContainer> getListenerContainers();
/**
* Return all {@link MessageListenerContainer} instances including those managed by
* this registry and those declared as beans in the application context.
* Prototype-scoped containers will be included. Lazy beans that have not yet been
* created will not be initialized by a call to this method.
* @return the {@link MessageListenerContainer} instance(s).
* @see #getListenerContainers()
*/
Collection<MessageListenerContainer> getAllListenerContainers();
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ListenerExecutionFailedException.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.springframework.kafka.KafkaException;
import org.springframework.lang.Nullable;
/**
* The listener specific {@link KafkaException} extension.
*
* @author Gary Russell
*/
@SuppressWarnings("serial")
public class ListenerExecutionFailedException extends KafkaException {
private final String groupId;
/**
* Construct an instance with the provided properties.
* @param message the exception message.
*/
public ListenerExecutionFailedException(String message) {
this(message, null, null);
}
/**
* Construct an instance with the provided properties.
* @param message the exception message.
* @param cause the cause.
*/
public ListenerExecutionFailedException(String message, @Nullable Throwable cause) {
this(message, null, cause);
}
/**
* Construct an instance with the provided properties.
* @param message the exception message.
* @param groupId the container's group.id property.
* @param cause the cause.
* @since 2.2.4
*/
public ListenerExecutionFailedException(String message, @Nullable String groupId, @Nullable Throwable cause) {
super(message, cause);
this.groupId = groupId;
}
/**
* Return the consumer group.id property of the container that threw this exception.
* @return the group id; may be null, but not when the exception is passed to an error
* handler by a listener container.
* @since 2.2.4
*/
@Nullable
public String getGroupId() {
return this.groupId;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ListenerInvokingBatchErrorHandler.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.lang.Nullable;
/**
* A batch error handler that is capable of invoking the listener during error handling.
*
* @author Gary Russell
* @since 2.3.7
*
*/
@FunctionalInterface
public interface ListenerInvokingBatchErrorHandler extends ContainerAwareBatchErrorHandler {
@Override
default void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container) {
throw new UnsupportedOperationException("Container should never call this");
}
@Override
void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> records,
Consumer<?, ?> consumer, MessageListenerContainer container, Runnable invokeListener);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ListenerType.java | /*
* Copyright 2017-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
/**
* Defines the listener type.
*
* @author Gary Russell
* @since 2.0
*
*/
public enum ListenerType {
/**
* Acknowledging and consumer aware.
*/
ACKNOWLEDGING_CONSUMER_AWARE,
/**
* Consumer aware.
*/
CONSUMER_AWARE,
/**
* Acknowledging.
*/
ACKNOWLEDGING,
/**
* Simple.
*/
SIMPLE
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ListenerUtils.java | /*
* Copyright 2017-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectStreamClass;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.support.KafkaUtils;
import org.springframework.kafka.support.serializer.DeserializationException;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.backoff.BackOff;
import org.springframework.util.backoff.BackOffExecution;
/**
* Listener utilities.
*
* @author Gary Russell
* @since 2.0
*
*/
public final class ListenerUtils {
private ListenerUtils() {
}
private static final ThreadLocal<Boolean> LOG_METADATA_ONLY = new ThreadLocal<>();
private static final int DEFAULT_SLEEP_INTERVAL = 100;
private static final int SMALL_SLEEP_INTERVAL = 10;
private static final long SMALL_INTERVAL_THRESHOLD = 500;
/**
* Determine the type of the listener.
* @param listener the listener.
* @return the {@link ListenerType}.
*/
public static ListenerType determineListenerType(Object listener) {
Assert.notNull(listener, "Listener cannot be null");
ListenerType listenerType;
if (listener instanceof AcknowledgingConsumerAwareMessageListener
|| listener instanceof BatchAcknowledgingConsumerAwareMessageListener) {
listenerType = ListenerType.ACKNOWLEDGING_CONSUMER_AWARE;
}
else if (listener instanceof ConsumerAwareMessageListener
|| listener instanceof BatchConsumerAwareMessageListener) {
listenerType = ListenerType.CONSUMER_AWARE;
}
else if (listener instanceof AcknowledgingMessageListener
|| listener instanceof BatchAcknowledgingMessageListener) {
listenerType = ListenerType.ACKNOWLEDGING;
}
else if (listener instanceof GenericMessageListener) {
listenerType = ListenerType.SIMPLE;
}
else {
throw new IllegalArgumentException("Unsupported listener type: " + listener.getClass().getName());
}
return listenerType;
}
/**
* Extract a {@link DeserializationException} from the supplied header name, if
* present.
* @param record the consumer record.
* @param headerName the header name.
* @param logger the logger for logging errors.
* @return the exception or null.
* @since 2.3
*/
@Nullable
public static DeserializationException getExceptionFromHeader(final ConsumerRecord<?, ?> record,
String headerName, LogAccessor logger) {
Header header = record.headers().lastHeader(headerName);
if (header != null) {
byte[] value = header.value();
DeserializationException exception = byteArrayToDeserializationException(logger, value);
if (exception != null) {
Headers headers = new RecordHeaders(record.headers().toArray());
headers.remove(headerName);
exception.setHeaders(headers);
}
return exception;
}
return null;
}
/**
* Convert a byte array containing a serialized {@link DeserializationException} to the
* {@link DeserializationException}.
* @param logger a log accessor to log errors.
* @param value the bytes.
* @return the exception or null if deserialization fails.
* @since 2.8.1
*/
@Nullable
public static DeserializationException byteArrayToDeserializationException(LogAccessor logger, byte[] value) {
try {
ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(value)) {
boolean first = true;
@Override
protected Class<?> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {
if (this.first) {
this.first = false;
Assert.state(desc.getName().equals(DeserializationException.class.getName()),
"Header does not contain a DeserializationException");
}
return super.resolveClass(desc);
}
};
return (DeserializationException) ois.readObject();
}
catch (IOException | ClassNotFoundException | ClassCastException e) {
logger.error(e, "Failed to deserialize a deserialization exception");
return null;
}
}
/**
* Set to true to only log record metadata.
* @param onlyMeta true to only log record metadata.
* @since 2.2.14
* @deprecated in favor of {@link KafkaUtils#format(ConsumerRecord)}.
* @see #recordToString(ConsumerRecord)
*/
@Deprecated
public static void setLogOnlyMetadata(boolean onlyMeta) {
LOG_METADATA_ONLY.set(onlyMeta);
}
/**
* Return the {@link ConsumerRecord} as a String; either {@code toString()} or
* {@code topic-partition@offset}.
* @param record the record.
* @return the rendered record.
* @since 2.2.14
* @deprecated in favor of {@link KafkaUtils#format(ConsumerRecord)}.
* @see #setLogOnlyMetadata(boolean)
*/
@Deprecated
public static String recordToString(ConsumerRecord<?, ?> record) {
return recordToString(record, Boolean.TRUE.equals(LOG_METADATA_ONLY.get()));
}
/**
* Return the {@link ConsumerRecord} as a String; either {@code toString()} or
* {@code topic-partition@offset}.
* @param record the record.
* @param meta true to log just the metadata.
* @return the rendered record.
* @since 2.5.4
* @deprecated in favor of {@link KafkaUtils#format(ConsumerRecord)}.
*/
@Deprecated
public static String recordToString(ConsumerRecord<?, ?> record, boolean meta) {
return KafkaUtils.format(record, !meta);
}
/**
* Sleep according to the {@link BackOff}; when the {@link BackOffExecution} returns
* {@link BackOffExecution#STOP} sleep for the previous backOff.
* @param backOff the {@link BackOff} to create a new {@link BackOffExecution}.
* @param executions a thread local containing the {@link BackOffExecution} for this
* thread.
* @param lastIntervals a thread local containing the previous {@link BackOff}
* interval for this thread.
* @since 2.3.12
* @deprecated since 2.7 in favor of
* {@link #unrecoverableBackOff(BackOff, ThreadLocal, ThreadLocal, MessageListenerContainer)}.
*/
@Deprecated
public static void unrecoverableBackOff(BackOff backOff, ThreadLocal<BackOffExecution> executions,
ThreadLocal<Long> lastIntervals) {
try {
unrecoverableBackOff(backOff, executions, lastIntervals, new MessageListenerContainer() { // NOSONAR
@Override
public void stop() {
}
@Override
public void start() {
}
@Override
public boolean isRunning() {
return true;
}
@Override
public void setupMessageListener(Object messageListener) {
}
@Override
public Map<String, Map<MetricName, ? extends Metric>> metrics() {
return null; // NOSONAR
}
});
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
/**
* Sleep according to the {@link BackOff}; when the {@link BackOffExecution} returns
* {@link BackOffExecution#STOP} sleep for the previous backOff.
* @param backOff the {@link BackOff} to create a new {@link BackOffExecution}.
* @param executions a thread local containing the {@link BackOffExecution} for this
* thread.
* @param lastIntervals a thread local containing the previous {@link BackOff}
* interval for this thread.
* @param container the container or parent container.
* @throws InterruptedException if the thread is interrupted.
* @since 2.7
*/
public static void unrecoverableBackOff(BackOff backOff, ThreadLocal<BackOffExecution> executions,
ThreadLocal<Long> lastIntervals, MessageListenerContainer container) throws InterruptedException {
BackOffExecution backOffExecution = executions.get();
if (backOffExecution == null) {
backOffExecution = backOff.start();
executions.set(backOffExecution);
}
Long interval = backOffExecution.nextBackOff();
if (interval == BackOffExecution.STOP) {
interval = lastIntervals.get();
if (interval == null) {
interval = Long.valueOf(0);
}
}
lastIntervals.set(interval);
if (interval > 0) {
stoppableSleep(container, interval);
}
}
/**
* Sleep for the desired timeout, as long as the container continues to run.
* @param container the container.
* @param interval the timeout.
* @throws InterruptedException if the thread is interrupted.
* @since 2.7
*/
public static void stoppableSleep(MessageListenerContainer container, long interval) throws InterruptedException {
long timeout = System.currentTimeMillis() + interval;
long sleepInterval = interval > SMALL_INTERVAL_THRESHOLD ? DEFAULT_SLEEP_INTERVAL : SMALL_SLEEP_INTERVAL;
do {
Thread.sleep(sleepInterval);
if (!container.isRunning()) {
break;
}
}
while (System.currentTimeMillis() < timeout);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/LoggingCommitCallback.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.Map;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.apache.kafka.common.TopicPartition;
import org.springframework.core.log.LogAccessor;
/**
* Logs commit results at DEBUG level for success and ERROR for failures.
*
* @author Gary Russell
* @since 2.2.4
*/
public final class LoggingCommitCallback implements OffsetCommitCallback {
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(LoggingCommitCallback.class));
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
if (exception != null) {
LOGGER.error(exception, () -> "Commit failed for " + offsets);
}
else {
LOGGER.debug(() -> "Commits for " + offsets + " completed");
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/LoggingErrorHandler.java | /*
* Copyright 2015-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.core.log.LogAccessor;
import org.springframework.lang.Nullable;
/**
* The {@link ErrorHandler} implementation for logging purpose.
*
* @author Marius Bogoevici
* @author Gary Russell
* @deprecated - use the {@link CommonLoggingErrorHandler} instead.
*/
@Deprecated
public class LoggingErrorHandler implements ErrorHandler {
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(LoggingErrorHandler.class));
@Override
public void handle(Exception thrownException, @Nullable ConsumerRecord<?, ?> record) {
LOGGER.error(thrownException, () -> "Error while processing: " + ListenerUtils.recordToString(record));
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/MessageListener.java | /*
* Copyright 2015-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
/**
* Listener for handling individual incoming Kafka messages.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Marius Bogoevici
* @author Gary Russell
*/
@FunctionalInterface
public interface MessageListener<K, V> extends GenericMessageListener<ConsumerRecord<K, V>> {
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/MessageListenerContainer.java | /*
* Copyright 2016-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.Collection;
import java.util.Map;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.context.SmartLifecycle;
import org.springframework.lang.Nullable;
/**
* Internal abstraction used by the framework representing a message
* listener container. Not meant to be implemented externally.
*
* @author Stephane Nicoll
* @author Gary Russell
* @author Vladimir Tsanev
* @author Tomaz Fernandes
*/
public interface MessageListenerContainer extends SmartLifecycle, DisposableBean {
/**
* Setup the message listener to use. Throws an {@link IllegalArgumentException}
* if that message listener type is not supported.
* @param messageListener the {@code object} to wrapped to the {@code MessageListener}.
*/
void setupMessageListener(Object messageListener);
/**
* Return metrics kept by this container's consumer(s), grouped by {@code client-id}.
* @return the consumer(s) metrics grouped by {@code client-id}
* @since 1.3
* @see org.apache.kafka.clients.consumer.Consumer#metrics()
*/
Map<String, Map<MetricName, ? extends Metric>> metrics();
/**
* Return the container properties for this container.
* @return the properties.
* @since 2.1.3
*/
default ContainerProperties getContainerProperties() {
throw new UnsupportedOperationException("This container doesn't support retrieving its properties");
}
/**
* Return the assigned topics/partitions for this container.
* @return the topics/partitions.
* @since 2.1.3
*/
@Nullable
default Collection<TopicPartition> getAssignedPartitions() {
throw new UnsupportedOperationException("This container doesn't support retrieving its assigned partitions");
}
/**
* Return the assigned topics/partitions for this container, by client.id.
* @return the topics/partitions.
* @since 2.5
*/
@Nullable
default Map<String, Collection<TopicPartition>> getAssignmentsByClientId() {
throw new UnsupportedOperationException("This container doesn't support retrieving its assigned partitions");
}
/**
* Pause this container before the next poll(). This is a thread-safe operation, the
* actual pause is processed by the consumer thread.
* @since 2.1.3
* @see org.apache.kafka.clients.consumer.KafkaConsumer#pause(Collection)
*/
default void pause() {
throw new UnsupportedOperationException("This container doesn't support pause");
}
/**
* Resume this container, if paused, after the next poll(). This is a thread-safe
* operation, the actual resume is processed by the consumer thread.
* @since 2.1.3
* @see org.apache.kafka.clients.consumer.KafkaConsumer#resume(Collection)
*/
default void resume() {
throw new UnsupportedOperationException("This container doesn't support resume");
}
/**
* Pause this partition before the next poll(). This is a thread-safe operation, the
* actual pause is processed by the consumer thread.
* @param topicPartition the topicPartition to pause.
* @since 2.7
*/
default void pausePartition(TopicPartition topicPartition) {
throw new UnsupportedOperationException("This container doesn't support pausing a partition");
}
/**
* Resume this partition, if paused, after the next poll(). This is a thread-safe operation, the
* actual pause is processed by the consumer thread.
* @param topicPartition the topicPartition to resume.
* @since 2.7
*/
default void resumePartition(TopicPartition topicPartition) {
throw new UnsupportedOperationException("This container doesn't support resuming a partition");
}
/**
* Whether or not this topic's partition pause has been requested.
* @param topicPartition the topic partition to check
* @return true if pause for this TopicPartition has been requested
* @since 2.7
*/
default boolean isPartitionPauseRequested(TopicPartition topicPartition) {
throw new UnsupportedOperationException("This container doesn't support pausing a partition");
}
/**
* Whether or not this topic's partition is currently paused.
* @param topicPartition the topic partition to check
* @return true if this partition has been paused.
* @since 2.7
*/
default boolean isPartitionPaused(TopicPartition topicPartition) {
throw new UnsupportedOperationException("This container doesn't support checking if a partition is paused");
}
/**
* Return true if {@link #pause()} has been called; the container might not have actually
* paused yet.
* @return true if pause has been requested.
* @since 2.1.5
*/
default boolean isPauseRequested() {
throw new UnsupportedOperationException("This container doesn't support pause/resume");
}
/**
* Return true if {@link #pause()} has been called; and all consumers in this container
* have actually paused.
* @return true if the container is paused.
* @since 2.1.5
*/
default boolean isContainerPaused() {
throw new UnsupportedOperationException("This container doesn't support pause/resume");
}
/**
* Set the autoStartup.
* @param autoStartup the autoStartup to set.
* @since 2.2
* @see SmartLifecycle
*/
default void setAutoStartup(boolean autoStartup) {
// empty
}
/**
* Return the {@code group.id} property for this container whether specifically set on the
* container or via a consumer property on the consumer factory.
* @return the group id.
* @since 2.2.5
*/
@Nullable
default String getGroupId() {
throw new UnsupportedOperationException("This container does not support retrieving the group id");
}
/**
* The 'id' attribute of a {@code @KafkaListener} or the bean name for spring-managed
* containers.
* @return the id or bean name.
* @since 2.2.5
*/
@Nullable
default String getListenerId() {
throw new UnsupportedOperationException("This container does not support retrieving the listener id");
}
/**
* If this container has child containers, return true if at least one child is running. If there are not
* child containers, returns {@link #isRunning()}.
* @return true if a child is running.
* @since 2.7.3
*/
default boolean isChildRunning() {
return isRunning();
}
/**
* Return true if the container is running, has never been started, or has been
* stopped.
* @return true if the state is as expected.
* @since 2.8
* @see #stopAbnormally(Runnable)
*/
default boolean isInExpectedState() {
return true;
}
/**
* Stop the container after some exception so that {@link #isInExpectedState()} will
* return false.
* @param callback the callback.
* @since 2.8
* @see #isInExpectedState()
*/
default void stopAbnormally(Runnable callback) {
stop(callback);
}
@Override
default void destroy() {
stop();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/PartitionPausingBackOffManagerFactory.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.time.Clock;
import org.springframework.context.ApplicationListener;
import org.springframework.context.event.ContextClosedEvent;
import org.springframework.core.task.TaskExecutor;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.util.Assert;
/**
*
* Creates a {@link KafkaConsumerBackoffManager} instance
* with or without a {@link KafkaConsumerTimingAdjuster}.
*
* @author Tomaz Fernandes
* @since 2.7
*/
public class PartitionPausingBackOffManagerFactory extends AbstractKafkaBackOffManagerFactory {
private boolean timingAdjustmentEnabled = true;
private KafkaConsumerTimingAdjuster timingAdjustmentManager;
private TaskExecutor taskExecutor;
private final Clock clock;
/**
* Constructs a factory instance that will create the {@link KafkaConsumerBackoffManager}
* instances with the provided {@link KafkaConsumerTimingAdjuster}.
*
* @param timingAdjustmentManager the {@link KafkaConsumerTimingAdjuster} to be used.
*/
public PartitionPausingBackOffManagerFactory(KafkaConsumerTimingAdjuster timingAdjustmentManager) {
this.clock = getDefaultClock();
setTimingAdjustmentManager(timingAdjustmentManager);
}
/**
* Constructs a factory instance that will create the {@link KafkaConsumerBackoffManager}
* instances with the provided {@link TaskExecutor} in its {@link KafkaConsumerTimingAdjuster}.
*
* @param timingAdjustmentManagerTaskExecutor the {@link TaskExecutor} to be used.
*/
public PartitionPausingBackOffManagerFactory(TaskExecutor timingAdjustmentManagerTaskExecutor) {
this.clock = getDefaultClock();
setTaskExecutor(timingAdjustmentManagerTaskExecutor);
}
/**
* Constructs a factory instance specifying whether or not timing adjustment is enabled
* for this factories {@link KafkaConsumerBackoffManager}.
*
* @param timingAdjustmentEnabled the {@link KafkaConsumerTimingAdjuster} to be used.
*/
public PartitionPausingBackOffManagerFactory(boolean timingAdjustmentEnabled) {
this.clock = getDefaultClock();
setTimingAdjustmentEnabled(timingAdjustmentEnabled);
}
/**
* Constructs a factory instance using the provided {@link ListenerContainerRegistry}.
*
* @param listenerContainerRegistry the {@link ListenerContainerRegistry} to be used.
*/
public PartitionPausingBackOffManagerFactory(ListenerContainerRegistry listenerContainerRegistry) {
super(listenerContainerRegistry);
this.clock = getDefaultClock();
}
/**
* Constructs a factory instance with default dependencies.
*/
public PartitionPausingBackOffManagerFactory() {
this.clock = getDefaultClock();
}
/**
* Constructs an factory instance that will create the {@link KafkaConsumerBackoffManager}
* with the provided {@link Clock}.
* @param clock the clock instance to be used.
*/
public PartitionPausingBackOffManagerFactory(Clock clock) {
this.clock = clock;
}
/**
* Set this property to false if you don't want the resulting KafkaBackOffManager
* to adjust the precision of the topics' consumption timing.
*
* @param timingAdjustmentEnabled set to false to disable timing adjustment.
*/
public final void setTimingAdjustmentEnabled(boolean timingAdjustmentEnabled) {
this.timingAdjustmentEnabled = timingAdjustmentEnabled;
}
/**
* Sets the {@link WakingKafkaConsumerTimingAdjuster} that will be used
* with the resulting {@link KafkaConsumerBackoffManager}.
*
* @param timingAdjustmentManager the adjustmentManager to be used.
*/
public final void setTimingAdjustmentManager(KafkaConsumerTimingAdjuster timingAdjustmentManager) {
Assert.isTrue(this.timingAdjustmentEnabled, () -> "TimingAdjustment is disabled for this factory.");
this.timingAdjustmentManager = timingAdjustmentManager;
}
/**
* Sets the {@link TaskExecutor} that will be used in the {@link KafkaConsumerTimingAdjuster}.
* @param taskExecutor the taskExecutor to be used.
*/
public final void setTaskExecutor(TaskExecutor taskExecutor) {
Assert.isTrue(this.timingAdjustmentEnabled, () -> "TimingAdjustment is disabled for this factory.");
this.taskExecutor = taskExecutor;
}
@Override
protected KafkaConsumerBackoffManager doCreateManager(ListenerContainerRegistry registry) {
PartitionPausingBackoffManager kafkaConsumerBackoffManager = getKafkaConsumerBackoffManager(registry);
super.addApplicationListener(kafkaConsumerBackoffManager);
return kafkaConsumerBackoffManager;
}
protected final Clock getDefaultClock() {
return Clock.systemUTC();
}
private PartitionPausingBackoffManager getKafkaConsumerBackoffManager(ListenerContainerRegistry registry) {
return this.timingAdjustmentEnabled
? new PartitionPausingBackoffManager(registry, getOrCreateBackOffTimingAdjustmentManager(), this.clock)
: new PartitionPausingBackoffManager(registry, this.clock);
}
private KafkaConsumerTimingAdjuster getOrCreateBackOffTimingAdjustmentManager() {
if (this.timingAdjustmentManager != null) {
return this.timingAdjustmentManager;
}
return new WakingKafkaConsumerTimingAdjuster(getOrCreateTimingAdjustmentThreadExecutor());
}
private TaskExecutor getOrCreateTimingAdjustmentThreadExecutor() {
if (this.taskExecutor != null) {
return this.taskExecutor;
}
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
executor.initialize();
super.addApplicationListener((ApplicationListener<ContextClosedEvent>) event -> executor.shutdown());
return executor;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/PartitionPausingBackoffManager.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.time.Clock;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.common.TopicPartition;
import org.springframework.context.ApplicationListener;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.event.ListenerContainerPartitionIdleEvent;
import org.springframework.lang.Nullable;
/**
*
* A manager that backs off consumption for a given topic if the timestamp provided is not
* due. Use with {@link SeekToCurrentErrorHandler} to guarantee that the message is read
* again after partition consumption is resumed (or seek it manually by other means).
* It's also necessary to set a {@link ContainerProperties#setIdlePartitionEventInterval(Long)}
* so the Manager can resume the partition consumption.
*
* Note that when a record backs off the partition consumption gets paused for
* approximately that amount of time, so you must have a fixed backoff value per partition.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @since 2.7
* @see SeekToCurrentErrorHandler
*/
public class PartitionPausingBackoffManager implements KafkaConsumerBackoffManager,
ApplicationListener<ListenerContainerPartitionIdleEvent> {
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(KafkaConsumerBackoffManager.class));
private final ListenerContainerRegistry listenerContainerRegistry;
private final Map<TopicPartition, Context> backOffContexts;
private final Clock clock;
private final KafkaConsumerTimingAdjuster kafkaConsumerTimingAdjuster;
/**
* Constructs an instance with the provided {@link ListenerContainerRegistry} and
* {@link KafkaConsumerTimingAdjuster}.
*
* The ListenerContainerRegistry is used to fetch the {@link MessageListenerContainer}
* that will be backed off / resumed.
*
* The KafkaConsumerTimingAdjuster is used to make timing adjustments
* in the message consumption so that it processes the message closer
* to its due time rather than later.
*
* @param listenerContainerRegistry the listenerContainerRegistry to use.
* @param kafkaConsumerTimingAdjuster the kafkaConsumerTimingAdjuster to use.
*/
public PartitionPausingBackoffManager(ListenerContainerRegistry listenerContainerRegistry,
KafkaConsumerTimingAdjuster kafkaConsumerTimingAdjuster) {
this.listenerContainerRegistry = listenerContainerRegistry;
this.kafkaConsumerTimingAdjuster = kafkaConsumerTimingAdjuster;
this.clock = Clock.systemUTC();
this.backOffContexts = new HashMap<>();
}
/**
* Constructs an instance with the provided {@link ListenerContainerRegistry}
* and with no timing adjustment capabilities.
*
* The ListenerContainerRegistry is used to fetch the {@link MessageListenerContainer}
* that will be backed off / resumed.
*
* @param listenerContainerRegistry the listenerContainerRegistry to use.
*/
public PartitionPausingBackoffManager(ListenerContainerRegistry listenerContainerRegistry) {
this.listenerContainerRegistry = listenerContainerRegistry;
this.kafkaConsumerTimingAdjuster = null;
this.clock = Clock.systemUTC();
this.backOffContexts = new HashMap<>();
}
/**
* Creates an instance with the provided {@link ListenerContainerRegistry},
* {@link KafkaConsumerTimingAdjuster} and {@link Clock}.
*
* @param listenerContainerRegistry the listenerContainerRegistry to use.
* @param kafkaConsumerTimingAdjuster the kafkaConsumerTimingAdjuster to use.
* @param clock the clock to use.
*/
public PartitionPausingBackoffManager(ListenerContainerRegistry listenerContainerRegistry,
KafkaConsumerTimingAdjuster kafkaConsumerTimingAdjuster,
Clock clock) {
this.listenerContainerRegistry = listenerContainerRegistry;
this.clock = clock;
this.kafkaConsumerTimingAdjuster = kafkaConsumerTimingAdjuster;
this.backOffContexts = new HashMap<>();
}
/**
* Creates an instance with the provided {@link ListenerContainerRegistry}
* and {@link Clock}, with no timing adjustment capabilities.
*
* @param listenerContainerRegistry the listenerContainerRegistry to use.
* @param clock the clock to use.
*/
public PartitionPausingBackoffManager(ListenerContainerRegistry listenerContainerRegistry, Clock clock) {
this.listenerContainerRegistry = listenerContainerRegistry;
this.clock = clock;
this.kafkaConsumerTimingAdjuster = null;
this.backOffContexts = new HashMap<>();
}
/**
* Backs off if the current time is before the dueTimestamp provided
* in the {@link Context} object.
* @param context the back off context for this execution.
*/
@Override
public void backOffIfNecessary(Context context) {
long backoffTime = context.getDueTimestamp() - getCurrentMillisFromClock();
LOGGER.debug(() -> "Back off time: " + backoffTime + " Context: " + context);
if (backoffTime > 0) {
pauseConsumptionAndThrow(context, backoffTime);
}
}
private void pauseConsumptionAndThrow(Context context, Long backOffTime) throws KafkaBackoffException {
TopicPartition topicPartition = context.getTopicPartition();
getListenerContainerFromContext(context).pausePartition(topicPartition);
addBackoff(context, topicPartition);
throw new KafkaBackoffException(String.format("Partition %s from topic %s is not ready for consumption, " +
"backing off for approx. %s millis.", context.getTopicPartition().partition(),
context.getTopicPartition().topic(), backOffTime),
topicPartition, context.getListenerId(), context.getDueTimestamp());
}
@Override
public void onApplicationEvent(ListenerContainerPartitionIdleEvent partitionIdleEvent) {
LOGGER.debug(() -> String.format("partitionIdleEvent received at %s. Partition: %s",
getCurrentMillisFromClock(), partitionIdleEvent.getTopicPartition()));
Context backOffContext = getBackOffContext(partitionIdleEvent.getTopicPartition());
maybeResumeConsumption(backOffContext);
}
private long getCurrentMillisFromClock() {
return Instant.now(this.clock).toEpochMilli();
}
private void maybeResumeConsumption(@Nullable Context context) {
if (context == null) {
return;
}
long now = getCurrentMillisFromClock();
long timeUntilDue = context.getDueTimestamp() - now;
long pollTimeout = getListenerContainerFromContext(context)
.getContainerProperties()
.getPollTimeout();
boolean isDue = timeUntilDue <= pollTimeout;
long adjustedAmount = applyTimingAdjustment(context, timeUntilDue, pollTimeout);
if (adjustedAmount != 0L || isDue) {
resumePartition(context);
}
else {
LOGGER.debug(() -> String.format("TopicPartition %s not due. DueTimestamp: %s Now: %s ",
context.getTopicPartition(), context.getDueTimestamp(), now));
}
}
private long applyTimingAdjustment(Context context, long timeUntilDue, long pollTimeout) {
if (this.kafkaConsumerTimingAdjuster == null || context.getConsumerForTimingAdjustment() == null) {
LOGGER.debug(() -> String.format(
"Skipping timing adjustment for TopicPartition %s.", context.getTopicPartition()));
return 0L;
}
return this.kafkaConsumerTimingAdjuster.adjustTiming(
context.getConsumerForTimingAdjustment(),
context.getTopicPartition(), pollTimeout, timeUntilDue);
}
private void resumePartition(Context context) {
MessageListenerContainer container = getListenerContainerFromContext(context);
LOGGER.debug(() -> "Resuming partition at " + getCurrentMillisFromClock());
container.resumePartition(context.getTopicPartition());
removeBackoff(context.getTopicPartition());
}
private MessageListenerContainer getListenerContainerFromContext(Context context) {
return this.listenerContainerRegistry.getListenerContainer(context.getListenerId()); // NOSONAR
}
protected void addBackoff(Context context, TopicPartition topicPartition) {
synchronized (this.backOffContexts) {
this.backOffContexts.put(topicPartition, context);
}
}
protected @Nullable Context getBackOffContext(TopicPartition topicPartition) {
synchronized (this.backOffContexts) {
return this.backOffContexts.get(topicPartition);
}
}
protected void removeBackoff(TopicPartition topicPartition) {
synchronized (this.backOffContexts) {
this.backOffContexts.remove(topicPartition);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/RecordInterceptor.java | /*
* Copyright 2019-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.lang.Nullable;
/**
* An interceptor for {@link ConsumerRecord} invoked by the listener
* container before and after invoking the listener.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.2.7
*
*/
@FunctionalInterface
public interface RecordInterceptor<K, V> extends ThreadStateProcessor {
/**
* Perform some action on the record or return a different one. If null is returned
* the record will be skipped. Invoked before the listener.
* @param record the record.
* @return the record or null.
* @deprecated in favor of {@link #intercept(ConsumerRecord, Consumer)} which will
* become the required method in a future release.
*/
@Deprecated
@Nullable
ConsumerRecord<K, V> intercept(ConsumerRecord<K, V> record);
/**
* Perform some action on the record or return a different one. If null is returned
* the record will be skipped. Invoked before the listener.
* @param record the record.
* @param consumer the consumer.
* @return the record or null.
* @since 2.7
*/
@Nullable
default ConsumerRecord<K, V> intercept(ConsumerRecord<K, V> record,
@SuppressWarnings("unused") Consumer<K, V> consumer) {
return intercept(record);
}
/**
* Called after the listener exits normally.
* @param record the record.
* @param consumer the consumer.
* @since 2.7
*/
default void success(ConsumerRecord<K, V> record, Consumer<K, V> consumer) {
}
/**
* Called after the listener throws an exception.
* @param record the record.
* @param exception the exception.
* @param consumer the consumer.
* @since 2.7
*/
default void failure(ConsumerRecord<K, V> record, Exception exception, Consumer<K, V> consumer) {
}
/**
* Called when processing the record is complete either
* {@link #success(ConsumerRecord, Consumer)} or
* {@link #failure(ConsumerRecord, Exception, Consumer)}.
* @param record the record.
* @param consumer the consumer.
* @since 2.8
*/
default void afterRecord(ConsumerRecord<K, V> record, Consumer<K, V> consumer) {
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/RecoveringBatchErrorHandler.java | /*
* Copyright 2020-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.function.BiConsumer;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.lang.Nullable;
import org.springframework.util.backoff.BackOff;
/**
* An error handler that seeks to the current offset for each topic in a batch of records.
* Used to rewind partitions after a message failure so that the batch can be replayed. If
* the listener throws a {@link BatchListenerFailedException}, with the failed record. The
* records before the record will have their offsets committed and the partitions for the
* remaining records will be repositioned and/or the failed record can be recovered and
* skipped. If some other exception is thrown, or a valid record is not provided in the
* exception, error handling is delegated to a {@link SeekToCurrentBatchErrorHandler} with
* this handler's {@link BackOff}. If the record is recovered, its offset is committed.
*
* @author Gary Russell
* @author Myeonghyeon Lee
* @since 2.5
* @deprecated in favor of {@link DefaultErrorHandler}.
*/
@Deprecated
public class RecoveringBatchErrorHandler extends FailedBatchProcessor
implements ContainerAwareBatchErrorHandler {
private boolean ackAfterHandle = true;
/**
* Construct an instance with the default recoverer which simply logs the record after
* {@value SeekUtils#DEFAULT_MAX_FAILURES} (maxFailures) have occurred for a
* topic/partition/offset.
*/
public RecoveringBatchErrorHandler() {
this(null, SeekUtils.DEFAULT_BACK_OFF);
}
/**
* Construct an instance with the default recoverer which simply logs the record after
* the backOff returns STOP for a topic/partition/offset.
* @param backOff the {@link BackOff}.
*/
public RecoveringBatchErrorHandler(BackOff backOff) {
this(null, backOff);
}
/**
* Construct an instance with the provided recoverer which will be called after
* {@value SeekUtils#DEFAULT_MAX_FAILURES} (maxFailures) have occurred for a
* topic/partition/offset.
* @param recoverer the recoverer.
*/
public RecoveringBatchErrorHandler(BiConsumer<ConsumerRecord<?, ?>, Exception> recoverer) {
this(recoverer, SeekUtils.DEFAULT_BACK_OFF);
}
/**
* Construct an instance with the provided recoverer which will be called after the
* backOff returns STOP for a topic/partition/offset.
* @param recoverer the recoverer; if null, the default (logging) recoverer is used.
* @param backOff the {@link BackOff}.
* @since 2.3
*/
public RecoveringBatchErrorHandler(@Nullable BiConsumer<ConsumerRecord<?, ?>, Exception> recoverer,
BackOff backOff) {
super(recoverer, backOff, createFallback(backOff));
}
private static CommonErrorHandler createFallback(BackOff backOff) {
SeekToCurrentBatchErrorHandler eh = new SeekToCurrentBatchErrorHandler();
eh.setBackOff(backOff);
return new ErrorHandlerAdapter(eh);
}
@Override
public boolean isAckAfterHandle() {
return this.ackAfterHandle;
}
@Override
public void setAckAfterHandle(boolean ackAfterHandle) {
this.ackAfterHandle = ackAfterHandle;
}
@Override
public void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container) {
doHandle(thrownException, data, consumer, container, () -> { }); // NOSONAR
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/RecoveryStrategy.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.lang.Nullable;
/**
* Called to determine whether a record should be skipped.
*
* @author Gary Russell
* @since 2.7
*/
@FunctionalInterface
public interface RecoveryStrategy {
/**
* Return true if the record should be skipped because it was successfully
* recovered.
* @param record the record.
* @param ex the exception.
* @param container the container (or parent if a child container).
* @param consumer the consumer.
* @return true to skip.
* @throws InterruptedException if the thread is interrupted.
*/
boolean recovered(ConsumerRecord<?, ?> record, Exception ex, @Nullable MessageListenerContainer container,
@Nullable Consumer<?, ?> consumer) throws InterruptedException;
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/RemainingRecordsErrorHandler.java | /*
* Copyright 2017-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.lang.Nullable;
/**
* An error handler that has access to the unprocessed records from the last poll
* (including the failed record) and the consumer, for example to adjust offsets after an
* error. The records passed to the handler will not be passed to the listener
* (unless re-fetched if the handler performs seeks).
*
* @author Gary Russell
* @since 2.0.1
*
*/
@FunctionalInterface
public interface RemainingRecordsErrorHandler extends ConsumerAwareErrorHandler {
@Override
default void handle(Exception thrownException, @Nullable ConsumerRecord<?, ?> data, Consumer<?, ?> consumer) {
throw new UnsupportedOperationException("Container should never call this");
}
/**
* Handle the exception. The failed record is the first in the list.
* @param thrownException the exception.
* @param records the remaining records including the one that failed.
* @param consumer the consumer.
*/
void handle(Exception thrownException, @Nullable List<ConsumerRecord<?, ?>> records, Consumer<?, ?> consumer);
@Override
default void handle(Exception thrownException, @Nullable List<ConsumerRecord<?, ?>> records,
Consumer<?, ?> consumer, MessageListenerContainer container) {
handle(thrownException, records, consumer);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/RetryListener.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
/**
* A listener for retry activity.
*
* @author Gary Russell
* @since 2.7
*
*/
@FunctionalInterface
public interface RetryListener {
/**
* Called after a delivery failed for a record.
* @param record the failed record.
* @param ex the exception.
* @param deliveryAttempt the delivery attempt.
*/
void failedDelivery(ConsumerRecord<?, ?> record, Exception ex, int deliveryAttempt);
/**
* Called after a failing record was successfully recovered.
* @param record the record.
* @param ex the exception.
*/
default void recovered(ConsumerRecord<?, ?> record, Exception ex) {
}
/**
* Called after a recovery attempt failed.
* @param record the record.
* @param original the original exception causing the recovery attempt.
* @param failure the exception thrown by the recoverer.
*/
default void recoveryFailed(ConsumerRecord<?, ?> record, Exception original, Exception failure) {
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/RetryingBatchErrorHandler.java | /*
* Copyright 2020-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.function.BiConsumer;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.core.log.LogAccessor;
import org.springframework.lang.Nullable;
import org.springframework.util.backoff.BackOff;
import org.springframework.util.backoff.FixedBackOff;
/**
* A batch error handler that invokes the listener according to the supplied
* {@link BackOff}. The consumer is paused/polled/resumed before each retry in order to
* avoid a rebalance. If/when retries are exhausted, the provided
* {@link ConsumerRecordRecoverer} is invoked for each record in the batch. If the
* recoverer throws an exception, or the thread is interrupted while sleeping, seeks are
* performed so that the batch will be redelivered on the next poll.
*
* @author Gary Russell
* @since 2.3.7
* @deprecated in favor of {@link DefaultErrorHandler}.
*
*/
@Deprecated
public class RetryingBatchErrorHandler extends KafkaExceptionLogLevelAware
implements ListenerInvokingBatchErrorHandler {
private final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass()));
private final BackOff backOff;
private final BiConsumer<ConsumerRecords<?, ?>, Exception> recoverer;
@SuppressWarnings("deprecation")
private final CommonErrorHandler seeker = new ErrorHandlerAdapter(new SeekToCurrentBatchErrorHandler());
private boolean ackAfterHandle = true;
/**
* Construct an instance with a default {@link FixedBackOff} (unlimited attempts with
* a 5 second back off).
*/
public RetryingBatchErrorHandler() {
this(new FixedBackOff(), null);
}
/**
* Construct an instance with the provided {@link BackOff} and
* {@link ConsumerRecordRecoverer}. If the recoverer is {@code null}, the discarded
* records (topic-partition{@literal @}offset) will be logged.
* @param backOff the back off.
* @param recoverer the recoverer.
*/
public RetryingBatchErrorHandler(BackOff backOff, @Nullable ConsumerRecordRecoverer recoverer) {
this.backOff = backOff;
this.recoverer = (crs, ex) -> {
if (recoverer == null) {
this.logger.error(ex, () -> "Records discarded: " + ErrorHandlingUtils.recordsToString(crs));
}
else {
crs.spliterator().forEachRemaining(rec -> recoverer.accept(rec, ex));
}
};
}
@Override
public boolean isAckAfterHandle() {
return this.ackAfterHandle;
}
@Override
public void setAckAfterHandle(boolean ackAfterHandle) {
this.ackAfterHandle = ackAfterHandle;
}
@Override
public void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> records,
Consumer<?, ?> consumer, MessageListenerContainer container, Runnable invokeListener) {
if (records == null || records.count() == 0) {
this.logger.error(thrownException, "Called with no records; consumer exception");
return;
}
ErrorHandlingUtils.retryBatch(thrownException, records, consumer, container, invokeListener, this.backOff,
this.seeker, this.recoverer, this.logger, getLogLevel());
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/SeekToCurrentBatchErrorHandler.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.LinkedHashMap;
import java.util.stream.Collectors;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.kafka.KafkaException;
import org.springframework.lang.Nullable;
import org.springframework.util.backoff.BackOff;
import org.springframework.util.backoff.BackOffExecution;
/**
* An error handler that seeks to the current offset for each topic in a batch of records.
* Used to rewind partitions after a message failure so that the batch can be replayed.
*
* @author Gary Russell
* @since 2.1
* @deprecated with no replacement - use {@link DefaultErrorHandler} with an infinite
* {@link BackOff}.
*/
@Deprecated
public class SeekToCurrentBatchErrorHandler extends KafkaExceptionLogLevelAware
implements ContainerAwareBatchErrorHandler {
private final ThreadLocal<BackOffExecution> backOffs = new ThreadLocal<>(); // Intentionally not static
private final ThreadLocal<Long> lastIntervals = new ThreadLocal<>(); // Intentionally not static
private BackOff backOff;
/**
* Set a {@link BackOff} to suspend the thread after performing the seek. Since this
* error handler can never "recover" after retries are exhausted, if the back off
* returns STOP, then the previous interval is used.
* @param backOff the back off.
* @since 2.3
*/
public void setBackOff(BackOff backOff) {
this.backOff = backOff;
}
@Override
public void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container) {
if (data != null) {
data.partitions()
.stream()
.collect(
Collectors.toMap(tp -> tp,
tp -> data.records(tp).get(0).offset(), (u, v) -> (long) v, LinkedHashMap::new))
.forEach(consumer::seek);
if (this.backOff != null) {
try {
ListenerUtils.unrecoverableBackOff(this.backOff, this.backOffs, this.lastIntervals, container);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
throw new KafkaException("Seek to current after exception", getLogLevel(), thrownException);
}
}
@Override
public void clearThreadState() {
this.backOffs.remove();
this.lastIntervals.remove();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/SeekToCurrentErrorHandler.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import java.util.function.BiConsumer;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.lang.Nullable;
import org.springframework.util.backoff.BackOff;
/**
* An error handler that seeks to the current offset for each topic in the remaining
* records. Used to rewind partitions after a message failure so that it can be
* replayed.
*
* @author Gary Russell
* @author Artem Bilan
*
* @since 2.0.1
* @deprecated in favor of {@link DefaultErrorHandler}.
*/
@Deprecated
public class SeekToCurrentErrorHandler extends FailedRecordProcessor implements ContainerAwareErrorHandler {
private boolean ackAfterHandle = true;
/**
* Construct an instance with the default recoverer which simply logs the record after
* {@value SeekUtils#DEFAULT_MAX_FAILURES} (maxFailures) have occurred for a
* topic/partition/offset, with the default back off (9 retries, no delay).
* @since 2.2
*/
public SeekToCurrentErrorHandler() {
this(null, SeekUtils.DEFAULT_BACK_OFF);
}
/**
* Construct an instance with the default recoverer which simply logs the record after
* the backOff returns STOP for a topic/partition/offset.
* @param backOff the {@link BackOff}.
* @since 2.3
*/
public SeekToCurrentErrorHandler(BackOff backOff) {
this(null, backOff);
}
/**
* Construct an instance with the provided recoverer which will be called after
* {@value SeekUtils#DEFAULT_MAX_FAILURES} (maxFailures) have occurred for a
* topic/partition/offset.
* @param recoverer the recoverer.
* @since 2.2
*/
public SeekToCurrentErrorHandler(BiConsumer<ConsumerRecord<?, ?>, Exception> recoverer) {
this(recoverer, SeekUtils.DEFAULT_BACK_OFF);
}
/**
* Construct an instance with the provided recoverer which will be called after
* the backOff returns STOP for a topic/partition/offset.
* @param recoverer the recoverer; if null, the default (logging) recoverer is used.
* @param backOff the {@link BackOff}.
* @since 2.3
*/
public SeekToCurrentErrorHandler(@Nullable BiConsumer<ConsumerRecord<?, ?>, Exception> recoverer, BackOff backOff) {
super(recoverer, backOff);
}
/**
* {@inheritDoc}
* The container must be configured with
* {@link org.springframework.kafka.listener.ContainerProperties.AckMode#MANUAL_IMMEDIATE}.
* Whether or not the commit is sync or async depends on the container's syncCommits
* property.
* @param commitRecovered true to commit.
*/
@Override
public void setCommitRecovered(boolean commitRecovered) { // NOSONAR enhanced javadoc
super.setCommitRecovered(commitRecovered);
}
@Override
public boolean isAckAfterHandle() {
return this.ackAfterHandle;
}
@Override
public void setAckAfterHandle(boolean ackAfterHandle) {
this.ackAfterHandle = ackAfterHandle;
}
@Override
public void handle(Exception thrownException, @Nullable List<ConsumerRecord<?, ?>> records,
Consumer<?, ?> consumer, MessageListenerContainer container) {
SeekUtils.seekOrRecover(thrownException, records, consumer, container, isCommitRecovered(), // NOSONAR
getRecoveryStrategy(records, thrownException), this.logger, getLogLevel());
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/SeekUtils.java | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiPredicate;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.SerializationException;
import org.springframework.core.NestedRuntimeException;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.KafkaException;
import org.springframework.kafka.KafkaException.Level;
import org.springframework.kafka.listener.ContainerProperties.AckMode;
import org.springframework.lang.Nullable;
import org.springframework.util.ObjectUtils;
import org.springframework.util.backoff.FixedBackOff;
/**
* Seek utilities.
*
* @author Gary Russell
* @since 2.2
*
*/
public final class SeekUtils {
/**
* The number of times a topic/partition/offset can fail before being rejected.
*/
public static final int DEFAULT_MAX_FAILURES = 10;
/**
* The default back off - a {@link FixedBackOff} with 0 interval and
* {@link #DEFAULT_MAX_FAILURES} - 1 retry attempts.
*/
public static final FixedBackOff DEFAULT_BACK_OFF = new FixedBackOff(0, DEFAULT_MAX_FAILURES - 1);
private static final LoggingCommitCallback LOGGING_COMMIT_CALLBACK = new LoggingCommitCallback();
private SeekUtils() {
}
/**
* Seek records to earliest position, optionally skipping the first.
* @param records the records.
* @param consumer the consumer.
* @param exception the exception
* @param recoverable true if skipping the first record is allowed.
* @param skipper function to determine whether or not to skip seeking the first.
* @param logger a {@link LogAccessor} for seek errors.
* @return true if the failed record was skipped.
*/
public static boolean doSeeks(List<ConsumerRecord<?, ?>> records, Consumer<?, ?> consumer, Exception exception,
boolean recoverable, BiPredicate<ConsumerRecord<?, ?>, Exception> skipper, LogAccessor logger) {
return doSeeks(records, consumer, exception, recoverable, (rec, ex, cont, cons) -> skipper.test(rec, ex), null,
logger);
}
/**
* Seek records to earliest position, optionally skipping the first.
* @param records the records.
* @param consumer the consumer.
* @param exception the exception
* @param recoverable true if skipping the first record is allowed.
* @param recovery the {@link RecoveryStrategy}.
* @param container the container, or parent if a child.
* @param logger a {@link LogAccessor} for seek errors.
* @return true if the failed record was skipped.
*/
@SuppressWarnings("deprecation")
public static boolean doSeeks(List<ConsumerRecord<?, ?>> records, Consumer<?, ?> consumer, Exception exception,
boolean recoverable, RecoveryStrategy recovery, @Nullable MessageListenerContainer container,
LogAccessor logger) {
Map<TopicPartition, Long> partitions = new LinkedHashMap<>();
AtomicBoolean first = new AtomicBoolean(true);
AtomicBoolean skipped = new AtomicBoolean();
records.forEach(record -> {
if (recoverable && first.get()) {
try {
boolean test = recovery.recovered(record, exception, container, consumer);
skipped.set(test);
}
catch (Exception ex) {
if (isBackoffException(ex)) {
logger.debug(ex, () -> ListenerUtils.recordToString(record)
+ " included in seeks due to retry back off");
}
else {
logger.error(ex, () -> "Failed to determine if this record ("
+ ListenerUtils.recordToString(record)
+ ") should be recovererd, including in seeks");
}
skipped.set(false);
}
if (skipped.get()) {
logger.debug(() -> "Skipping seek of: " + ListenerUtils.recordToString(record));
}
}
if (!recoverable || !first.get() || !skipped.get()) {
partitions.computeIfAbsent(new TopicPartition(record.topic(), record.partition()),
offset -> record.offset());
}
first.set(false);
});
seekPartitions(consumer, partitions, logger);
return skipped.get();
}
/**
* Perform seek operations on each partition.
* @param consumer the consumer.
* @param partitions the partitions.
* @param logger the logger.
* @since 2.5
*/
public static void seekPartitions(Consumer<?, ?> consumer, Map<TopicPartition, Long> partitions,
LogAccessor logger) {
partitions.forEach((topicPartition, offset) -> {
try {
logger.trace(() -> "Seeking: " + topicPartition + " to: " + offset);
consumer.seek(topicPartition, offset);
}
catch (Exception e) {
logger.error(e, () -> "Failed to seek " + topicPartition + " to " + offset);
}
});
}
/**
* Seek the remaining records, optionally recovering the first.
* @param thrownException the exception.
* @param records the remaining records.
* @param consumer the consumer.
* @param container the container.
* @param commitRecovered true to commit the recovererd record offset.
* @param skipPredicate the skip predicate.
* @param logger the logger.
* @param level the log level for the thrown exception after handling.
* @since 2.5
*/
public static void seekOrRecover(Exception thrownException, List<ConsumerRecord<?, ?>> records,
Consumer<?, ?> consumer, MessageListenerContainer container, boolean commitRecovered,
BiPredicate<ConsumerRecord<?, ?>, Exception> skipPredicate, LogAccessor logger, Level level) {
seekOrRecover(thrownException, records, consumer, container, commitRecovered,
(rec, ex, cont, cons) -> skipPredicate.test(rec, ex), logger, level);
}
/**
* Seek the remaining records, optionally recovering the first.
* @param thrownException the exception.
* @param records the remaining records.
* @param consumer the consumer.
* @param container the container.
* @param commitRecovered true to commit the recovererd record offset.
* @param recovery the {@link RecoveryStrategy}.
* @param logger the logger.
* @param level the log level for the thrown exception after handling.
* @since 2.7
*/
public static void seekOrRecover(Exception thrownException, @Nullable List<ConsumerRecord<?, ?>> records,
Consumer<?, ?> consumer, MessageListenerContainer container, boolean commitRecovered,
RecoveryStrategy recovery, LogAccessor logger, Level level) {
if (ObjectUtils.isEmpty(records)) {
if (thrownException instanceof SerializationException) {
throw new IllegalStateException("This error handler cannot process 'SerializationException's directly; "
+ "please consider configuring an 'ErrorHandlingDeserializer' in the value and/or key "
+ "deserializer", thrownException);
}
else {
throw new IllegalStateException("This error handler cannot process '"
+ thrownException.getClass().getName()
+ "'s; no record information is available", thrownException);
}
}
if (records == null || !doSeeks(records, consumer, thrownException, true, recovery, container, logger)) { // NOSONAR
throw new KafkaException("Seek to current after exception", level, thrownException);
}
if (commitRecovered) {
if (container.getContainerProperties().getAckMode().equals(AckMode.MANUAL_IMMEDIATE)) {
ConsumerRecord<?, ?> record = records.get(0);
Map<TopicPartition, OffsetAndMetadata> offsetToCommit = Collections.singletonMap(
new TopicPartition(record.topic(), record.partition()),
new OffsetAndMetadata(record.offset() + 1));
if (container.getContainerProperties().isSyncCommits()) {
consumer.commitSync(offsetToCommit, container.getContainerProperties().getSyncCommitTimeout());
}
else {
OffsetCommitCallback commitCallback = container.getContainerProperties().getCommitCallback();
if (commitCallback == null) {
commitCallback = LOGGING_COMMIT_CALLBACK;
}
consumer.commitAsync(offsetToCommit, commitCallback);
}
}
else {
logger.debug(() -> "'commitRecovered' ignored, container AckMode must be MANUAL_IMMEDIATE, not "
+ container.getContainerProperties().getAckMode());
}
}
}
/**
* Return true if the exception is a {@link KafkaBackoffException}.
* @param exception the exception.
* @return true if it's a back off.
* @since 2.7
*/
public static boolean isBackoffException(Exception exception) {
return NestedRuntimeException.class.isAssignableFrom(exception.getClass()) // NOSONAR - unchecked cast
&& ((NestedRuntimeException) exception).contains(KafkaBackoffException.class);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ThreadStateProcessor.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
/**
* A general interface for managing thread-bound resources when a {@link Consumer} is
* available.
*
* @author Karol Dowbecki
* @author Gary Russell
* @since 2.8
*
*/
public interface ThreadStateProcessor {
/**
* Call to set up thread-bound resources which will be available for the
* entire duration of enclosed operation involving a {@link Consumer}.
*
* @param consumer the consumer.
*/
default void setupThreadState(Consumer<?, ?> consumer) {
}
/**
* Call to clear thread-bound resources which were set up in
* {@link #setupThreadState(Consumer)}.
*
* @param consumer the consumer.
*/
default void clearThreadState(Consumer<?, ?> consumer) {
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/TimestampedException.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.time.Clock;
import java.time.Instant;
import org.springframework.kafka.KafkaException;
/**
* A {@link KafkaException} that records the timestamp
* of when it was thrown.
*
* @author Tomaz Fernandes
* @since 2.7
*/
public class TimestampedException extends KafkaException {
private static final long serialVersionUID = -2544217643924234282L;
private final long timestamp;
public TimestampedException(Exception ex, Clock clock) {
super(ex.getMessage(), ex);
this.timestamp = Instant.now(clock).toEpochMilli();
}
public TimestampedException(Exception ex) {
super(ex.getMessage(), ex);
this.timestamp = Instant.now(Clock.systemDefaultZone()).toEpochMilli();
}
public long getTimestamp() {
return this.timestamp;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/WakingKafkaConsumerTimingAdjuster.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.time.Duration;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.TopicPartition;
import org.springframework.core.log.LogAccessor;
import org.springframework.core.task.TaskExecutor;
import org.springframework.retry.backoff.Sleeper;
import org.springframework.util.Assert;
/**
*
* Adjusts timing by creating a thread that will
* wakeup the consumer from polling, considering that, if consumption is paused,
* it will check for consumption resuming in increments of 'pollTimeout'. This works best
* if the consumer is handling a single partition.
*
* @author Tomaz Fernandes
* @since 2.7
* @see KafkaConsumerBackoffManager
*/
public class WakingKafkaConsumerTimingAdjuster implements KafkaConsumerTimingAdjuster {
private static final LogAccessor LOGGER =
new LogAccessor(LogFactory.getLog(WakingKafkaConsumerTimingAdjuster.class));
private static final long HUNDRED = 100L;
private static final Duration DEFAULT_TIMING_ADJUSTMENT_THRESHOLD = Duration.ofMillis(HUNDRED);
private static final int DEFAULT_POLL_TIMEOUTS_FOR_ADJUSTMENT_WINDOW = 2;
private Duration timingAdjustmentThreshold = DEFAULT_TIMING_ADJUSTMENT_THRESHOLD;
private int pollTimeoutsForAdjustmentWindow = DEFAULT_POLL_TIMEOUTS_FOR_ADJUSTMENT_WINDOW;
private final TaskExecutor timingAdjustmentTaskExecutor;
private final Sleeper sleeper;
public WakingKafkaConsumerTimingAdjuster(TaskExecutor timingAdjustmentTaskExecutor, Sleeper sleeper) {
Assert.notNull(timingAdjustmentTaskExecutor, "Task executor cannot be null.");
Assert.notNull(sleeper, "Sleeper cannot be null.");
this.timingAdjustmentTaskExecutor = timingAdjustmentTaskExecutor;
this.sleeper = sleeper;
}
public WakingKafkaConsumerTimingAdjuster(TaskExecutor timingAdjustmentTaskExecutor) {
Assert.notNull(timingAdjustmentTaskExecutor, "Task executor cannot be null.");
this.timingAdjustmentTaskExecutor = timingAdjustmentTaskExecutor;
this.sleeper = Thread::sleep;
}
/**
*
* Sets how many pollTimeouts prior to the dueTimeout the adjustment will take place.
* Default is 2.
*
* @param pollTimeoutsForAdjustmentWindow the amount of pollTimeouts in the adjustment window.
*/
public void setPollTimeoutsForAdjustmentWindow(int pollTimeoutsForAdjustmentWindow) {
this.pollTimeoutsForAdjustmentWindow = pollTimeoutsForAdjustmentWindow;
}
/**
*
* Sets the threshold for the timing adjustment to take place. If the time difference between
* the probable instant the message will be consumed and the instant it should is lower than
* this value, no adjustment will be applied.
* Default is 100ms.
*
* @param timingAdjustmentThreshold the threshold to be set.
*/
public void setTimingAdjustmentThreshold(Duration timingAdjustmentThreshold) {
this.timingAdjustmentThreshold = timingAdjustmentThreshold;
}
/**
* Adjusts the timing with the provided parameters.
*
* @param consumerToAdjust the {@link Consumer} that will be adjusted
* @param topicPartition the {@link TopicPartition} that will be adjusted
* @param pollTimeout the pollConfiguration for the consumer's container
* @param timeUntilDue the amount of time until the message is due for consumption
* @return the adjusted amount in milliseconds
*/
public long adjustTiming(Consumer<?, ?> consumerToAdjust, TopicPartition topicPartition,
long pollTimeout, long timeUntilDue) {
boolean isInAdjustmentWindow = timeUntilDue > pollTimeout && timeUntilDue <=
pollTimeout * this.pollTimeoutsForAdjustmentWindow;
long adjustmentAmount = timeUntilDue % pollTimeout;
if (isInAdjustmentWindow && adjustmentAmount > this.timingAdjustmentThreshold.toMillis()) {
this.timingAdjustmentTaskExecutor.execute(() ->
doApplyTimingAdjustment(consumerToAdjust, topicPartition, adjustmentAmount));
return adjustmentAmount;
}
return 0L;
}
private void doApplyTimingAdjustment(Consumer<?, ?> consumerForTimingAdjustment,
TopicPartition topicPartition, long adjustmentAmount) {
try {
LOGGER.debug(() -> String.format("Applying timing adjustment of %s millis for TopicPartition %s",
adjustmentAmount, topicPartition));
this.sleeper.sleep(adjustmentAmount);
LOGGER.debug(() -> "Waking up consumer for partition topic: " + topicPartition);
consumerForTimingAdjustment.wakeup();
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IllegalStateException("Interrupted waking up consumer while applying timing adjustment " +
"for TopicPartition " + topicPartition, e);
}
catch (Exception e) { // NOSONAR
LOGGER.error(e, () -> "Error waking up consumer while applying timing adjustment " +
"for TopicPartition " + topicPartition);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/package-info.java | /**
* Package for kafka listeners
*/
@org.springframework.lang.NonNullApi
package org.springframework.kafka.listener;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/AbstractDelegatingMessageListenerAdapter.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import java.util.Collection;
import java.util.Map;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.common.TopicPartition;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.listener.ConsumerSeekAware;
import org.springframework.kafka.listener.DelegatingMessageListener;
import org.springframework.kafka.listener.ListenerType;
import org.springframework.kafka.listener.ListenerUtils;
/**
* Top level class for all listener adapters.
*
* @param <T> the delegate type.
*
* @author Gary Russell
* @since 1.1
*
*/
public abstract class AbstractDelegatingMessageListenerAdapter<T>
implements ConsumerSeekAware, DelegatingMessageListener<T> {
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(this.getClass())); // NOSONAR
protected final T delegate; //NOSONAR
protected final ListenerType delegateType; // NOSONAR
private final ConsumerSeekAware seekAware;
public AbstractDelegatingMessageListenerAdapter(T delegate) {
this.delegate = delegate;
this.delegateType = ListenerUtils.determineListenerType(delegate);
if (delegate instanceof ConsumerSeekAware) {
this.seekAware = (ConsumerSeekAware) delegate;
}
else {
this.seekAware = null;
}
}
@Override
public T getDelegate() {
return this.delegate;
}
@Override
public void registerSeekCallback(ConsumerSeekCallback callback) {
if (this.seekAware != null) {
this.seekAware.registerSeekCallback(callback);
}
}
@Override
public void onPartitionsAssigned(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
if (this.seekAware != null) {
this.seekAware.onPartitionsAssigned(assignments, callback);
}
}
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
if (this.seekAware != null) {
this.seekAware.onPartitionsRevoked(partitions);
}
}
@Override
public void onIdleContainer(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
if (this.seekAware != null) {
this.seekAware.onIdleContainer(assignments, callback);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/AbstractFilteringMessageListener.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.util.Assert;
/**
* An abstract message listener adapter that implements record filter logic
* via a {@link RecordFilterStrategy}.
*
* @param <K> the key type.
* @param <V> the value type.
* @param <T> the delegate type.
*
* @author Gary Russell
*
*/
public abstract class AbstractFilteringMessageListener<K, V, T>
extends AbstractDelegatingMessageListenerAdapter<T> {
private final RecordFilterStrategy<K, V> recordFilterStrategy;
protected AbstractFilteringMessageListener(T delegate, RecordFilterStrategy<K, V> recordFilterStrategy) {
super(delegate);
Assert.notNull(recordFilterStrategy, "'recordFilterStrategy' cannot be null");
this.recordFilterStrategy = recordFilterStrategy;
}
protected RecordFilterStrategy<K, V> getRecordFilterStrategy() {
return this.recordFilterStrategy;
}
protected boolean filter(ConsumerRecord<K, V> consumerRecord) {
return this.recordFilterStrategy.filter(consumerRecord);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/AbstractRetryingMessageListenerAdapter.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import org.springframework.lang.Nullable;
import org.springframework.retry.RecoveryCallback;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.util.Assert;
/**
* Base class for retrying message listener adapters.
*
* @param <K> the key type.
* @param <V> the value type.
* @param <T> the delegate type.
*
* @author Gary Russell
*
*/
public abstract class AbstractRetryingMessageListenerAdapter<K, V, T>
extends AbstractDelegatingMessageListenerAdapter<T> {
private final RetryTemplate retryTemplate;
private final RecoveryCallback<? extends Object> recoveryCallback;
/**
* Construct an instance with the supplied retry template. The exception will be
* thrown to the container after retries are exhausted.
* @param delegate the delegate listener.
* @param retryTemplate the template.
*/
public AbstractRetryingMessageListenerAdapter(T delegate, RetryTemplate retryTemplate) {
this(delegate, retryTemplate, null);
}
/**
* Construct an instance with the supplied template and callback.
* @param delegate the delegate listener.
* @param retryTemplate the template.
* @param recoveryCallback the recovery callback; if null, the exception will be
* thrown to the container after retries are exhausted.
*/
public AbstractRetryingMessageListenerAdapter(T delegate, RetryTemplate retryTemplate,
@Nullable RecoveryCallback<? extends Object> recoveryCallback) {
super(delegate);
Assert.notNull(retryTemplate, "'retryTemplate' cannot be null");
this.retryTemplate = retryTemplate;
this.recoveryCallback = recoveryCallback;
}
public RetryTemplate getRetryTemplate() {
return this.retryTemplate;
}
public RecoveryCallback<? extends Object> getRecoveryCallback() {
return this.recoveryCallback;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/AdapterUtils.java | /*
* Copyright 2020-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.TopicPartition;
import org.springframework.expression.ParserContext;
import org.springframework.expression.common.TemplateParserContext;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.lang.Nullable;
/**
* Utilities for listener adapters.
*
* @author Gary Russell
* @since 2.5
*
*/
public final class AdapterUtils {
/**
* Parser context for runtime SpEL using ! as the template prefix.
* @since 2.2.15
*/
public static final ParserContext PARSER_CONTEXT = new TemplateParserContext("!{", "}");
private AdapterUtils() {
}
/**
* Build a {@link ConsumerRecordMetadata} from the first {@link ConsumerRecord} in data, if any.
* @param data the data array.
* @return the metadata or null if data does not contain a {@link ConsumerRecord}.
*/
@Nullable
public static Object buildConsumerRecordMetadataFromArray(Object... data) {
for (Object object : data) {
ConsumerRecordMetadata metadata = buildConsumerRecordMetadata(object);
if (metadata != null) {
return metadata;
}
}
return null;
}
/**
* Build a {@link ConsumerRecordMetadata} from data which must be a
* {@link ConsumerRecord}.
* @param data the record.
* @return the metadata or null if data is not a {@link ConsumerRecord}.
*/
@Nullable
public static ConsumerRecordMetadata buildConsumerRecordMetadata(Object data) {
if (!(data instanceof ConsumerRecord)) {
return null;
}
ConsumerRecord<?, ?> record = (ConsumerRecord<?, ?>) data;
return new ConsumerRecordMetadata(new RecordMetadata(new TopicPartition(record.topic(), record.partition()),
record.offset(), 0, record.timestamp(), record.serializedKeySize(),
record.serializedValueSize()), record.timestampType());
}
/**
* Return the default expression when no SendTo value is present.
* @return the expression.
* @since 2.2.15
*/
public static String getDefaultReplyTopicExpression() {
return PARSER_CONTEXT.getExpressionPrefix() + "source.headers['"
+ KafkaHeaders.REPLY_TOPIC + "']" + PARSER_CONTEXT.getExpressionSuffix();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/BatchMessagingMessageListenerAdapter.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.kafka.listener.BatchAcknowledgingConsumerAwareMessageListener;
import org.springframework.kafka.listener.KafkaListenerErrorHandler;
import org.springframework.kafka.listener.ListenerExecutionFailedException;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.converter.BatchMessageConverter;
import org.springframework.kafka.support.converter.BatchMessagingMessageConverter;
import org.springframework.kafka.support.converter.RecordMessageConverter;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
import org.springframework.messaging.support.GenericMessage;
import org.springframework.messaging.support.MessageBuilder;
import org.springframework.util.Assert;
/**
* A {@link org.springframework.kafka.listener.MessageListener MessageListener}
* adapter that invokes a configurable {@link HandlerAdapter}; used when the factory is
* configured for the listener to receive batches of messages.
*
* <p>Wraps the incoming Kafka Message to Spring's {@link Message} abstraction.
*
* <p>The original {@code List<ConsumerRecord>} and
* the {@link Acknowledgment} are provided as additional arguments so that these can
* be injected as method arguments if necessary.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Stephane Nicoll
* @author Gary Russell
* @author Artem Bilan
* @author Venil Noronha
* @since 1.1
*/
public class BatchMessagingMessageListenerAdapter<K, V> extends MessagingMessageListenerAdapter<K, V>
implements BatchAcknowledgingConsumerAwareMessageListener<K, V> {
private BatchMessageConverter batchMessageConverter = new BatchMessagingMessageConverter();
private KafkaListenerErrorHandler errorHandler;
private BatchToRecordAdapter<K, V> batchToRecordAdapter;
/**
* Create an instance with the provided parameters.
* @param bean the listener bean.
* @param method the listener method.
*/
public BatchMessagingMessageListenerAdapter(Object bean, Method method) {
this(bean, method, null);
}
/**
* Create an instance with the provided parameters.
* @param bean the listener bean.
* @param method the listener method.
* @param errorHandler the error handler.
*/
public BatchMessagingMessageListenerAdapter(Object bean, Method method,
@Nullable KafkaListenerErrorHandler errorHandler) {
super(bean, method);
this.errorHandler = errorHandler;
}
/**
* Set the BatchMessageConverter.
* @param messageConverter the converter.
*/
public void setBatchMessageConverter(BatchMessageConverter messageConverter) {
Assert.notNull(messageConverter, "'messageConverter' cannot be null");
this.batchMessageConverter = messageConverter;
RecordMessageConverter recordMessageConverter = messageConverter.getRecordMessageConverter();
if (recordMessageConverter != null) {
setMessageConverter(recordMessageConverter);
}
}
/**
* Set a {@link BatchToRecordAdapter}.
* @param batchToRecordAdapter the adapter.
* @since 2.4.2
*/
public void setBatchToRecordAdapter(BatchToRecordAdapter<K, V> batchToRecordAdapter) {
this.batchToRecordAdapter = batchToRecordAdapter;
}
/**
* Return the {@link BatchMessagingMessageConverter} for this listener,
* being able to convert {@link org.springframework.messaging.Message}.
* @return the {@link BatchMessagingMessageConverter} for this listener,
* being able to convert {@link org.springframework.messaging.Message}.
*/
protected final BatchMessageConverter getBatchMessageConverter() {
return this.batchMessageConverter;
}
@Override
public boolean wantsPollResult() {
return isConsumerRecords();
}
@Override
public void onMessage(ConsumerRecords<K, V> records, @Nullable Acknowledgment acknowledgment,
Consumer<K, V> consumer) {
invoke(records, acknowledgment, consumer, NULL_MESSAGE);
}
/**
* Kafka {@link org.springframework.kafka.listener.MessageListener} entry point.
* <p>
* Delegate the message to the target listener method, with appropriate conversion of
* the message argument.
* @param records the incoming list of Kafka {@link ConsumerRecord}.
* @param acknowledgment the acknowledgment.
* @param consumer the consumer.
*/
@Override
public void onMessage(List<ConsumerRecord<K, V>> records, @Nullable Acknowledgment acknowledgment,
Consumer<?, ?> consumer) {
Message<?> message;
if (!isConsumerRecordList()) {
if (isMessageList() || this.batchToRecordAdapter != null) {
List<Message<?>> messages = new ArrayList<>(records.size());
for (ConsumerRecord<K, V> record : records) {
messages.add(toMessagingMessage(record, acknowledgment, consumer));
}
if (this.batchToRecordAdapter == null) {
message = MessageBuilder.withPayload(messages).build();
}
else {
logger.debug(() -> "Processing " + messages);
this.batchToRecordAdapter.adapt(messages, records, acknowledgment, consumer, this::invoke);
return;
}
}
else {
message = toMessagingMessage(records, acknowledgment, consumer);
}
}
else {
message = NULL_MESSAGE; // optimization since we won't need any conversion to invoke
}
logger.debug(() -> "Processing [" + message + "]");
invoke(records, acknowledgment, consumer, message);
}
protected void invoke(Object records, @Nullable Acknowledgment acknowledgment, Consumer<?, ?> consumer,
final Message<?> messageArg) {
Message<?> message = messageArg;
try {
Object result = invokeHandler(records, acknowledgment, message, consumer);
if (result != null) {
handleResult(result, records, message);
}
}
catch (ListenerExecutionFailedException e) { // NOSONAR ex flow control
if (this.errorHandler != null) {
try {
if (message.equals(NULL_MESSAGE)) {
message = new GenericMessage<>(records);
}
Object result = this.errorHandler.handleError(message, e, consumer);
if (result != null) {
handleResult(result, records, message);
}
}
catch (Exception ex) {
throw new ListenerExecutionFailedException(createMessagingErrorMessage(// NOSONAR stack trace loss
"Listener error handler threw an exception for the incoming message",
message.getPayload()), ex);
}
}
else {
throw e;
}
}
}
@SuppressWarnings({ "unchecked", "rawtypes" })
protected Message<?> toMessagingMessage(List records, @Nullable Acknowledgment acknowledgment,
Consumer<?, ?> consumer) {
return getBatchMessageConverter().toMessage(records, acknowledgment, consumer, getType());
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/BatchToRecordAdapter.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.messaging.Message;
/**
* An adapter that adapts a batch listener to a record listener method. Use this, for
* example, if you want a batch to be processed in a single transaction but wish to invoke
* the listener with each message individually.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.4.2
*
*/
@FunctionalInterface
public interface BatchToRecordAdapter<K, V> {
/**
* Adapt the list and invoke the callback for each message.
* @param messages the messages.
* @param records the records.
* @param ack the acknowledgment.
* @param consumer the consumer.
* @param callback the callback.
*/
void adapt(List<Message<?>> messages, List<ConsumerRecord<K, V>> records, Acknowledgment ack,
Consumer<?, ?> consumer, Callback<K, V> callback);
/**
* A callback for each message.
*
* @param <K> the key type.
* @param <V> the value type.
*/
@FunctionalInterface
interface Callback<K, V> {
/**
* Handle each message.
* @param record the record.
* @param ack the acknowledgment.
* @param consumer the consumer.
* @param message the message.
*/
void invoke(ConsumerRecord<K, V> record, Acknowledgment ack, Consumer<?, ?> consumer,
Message<?> message);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/ConsumerRecordMetadata.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.record.TimestampType;
/**
* Used to provide a listener method argument when the user supplies such a parameter.
* Delegates to {@link RecordMetadata} (which is final, hence no subclass) for all except
* timestamp type.
*
* @author Gary Russell
* @since 2.5
*
*/
public class ConsumerRecordMetadata {
private final RecordMetadata delegate;
private final TimestampType timestampType;
public ConsumerRecordMetadata(RecordMetadata delegate, TimestampType timestampType) {
this.delegate = delegate;
this.timestampType = timestampType;
}
public boolean hasOffset() {
return this.delegate.hasOffset();
}
public long offset() {
return this.delegate.offset();
}
public boolean hasTimestamp() {
return this.delegate.hasTimestamp();
}
public long timestamp() {
return this.delegate.timestamp();
}
public int serializedKeySize() {
return this.delegate.serializedKeySize();
}
public int serializedValueSize() {
return this.delegate.serializedValueSize();
}
public String topic() {
return this.delegate.topic();
}
public int partition() {
return this.delegate.partition();
}
public TimestampType timestampType() {
return this.timestampType;
}
@Override
public int hashCode() {
return this.delegate.hashCode() + this.timestampType.name().hashCode();
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof ConsumerRecordMetadata)) {
return false;
}
ConsumerRecordMetadata crm = (ConsumerRecordMetadata) obj;
return this.delegate.equals(crm.delegate)
&& this.timestampType.equals(crm.timestampType());
}
@Override
public String toString() {
return this.delegate.toString();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/DefaultBatchToRecordAdapter.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.listener.ConsumerRecordRecoverer;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.messaging.Message;
import org.springframework.util.Assert;
/**
* The default {@link BatchToRecordAdapter} implementation; if the supplied recoverer
* throws an exception, the batch will be aborted; otherwise the next record will be
* processed.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.4.2
*/
public class DefaultBatchToRecordAdapter<K, V> implements BatchToRecordAdapter<K, V> {
private static final LogAccessor LOGGER = new LogAccessor(DefaultBatchToRecordAdapter.class);
private final ConsumerRecordRecoverer recoverer;
/**
* Construct an instance with the default recoverer which simply logs the failed
* record.
*/
public DefaultBatchToRecordAdapter() {
this((record, ex) -> LOGGER.error(ex, () -> "Failed to process " + record));
}
/**
* Construct an instance with the provided recoverer.
* @param recoverer the recoverer.
*/
public DefaultBatchToRecordAdapter(ConsumerRecordRecoverer recoverer) {
Assert.notNull(recoverer, "'recoverer' cannot be null");
this.recoverer = recoverer;
}
@Override
public void adapt(List<Message<?>> messages, List<ConsumerRecord<K, V>> records, Acknowledgment ack,
Consumer<?, ?> consumer, Callback<K, V> callback) {
for (int i = 0; i < messages.size(); i++) {
Message<?> message = messages.get(i);
try {
callback.invoke(records.get(i), ack, consumer, message);
}
catch (Exception e) {
this.recoverer.accept(records.get(i), e);
}
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/DelegatingInvocableHandler.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import java.lang.annotation.Annotation;
import java.lang.reflect.Method;
import java.lang.reflect.Parameter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.config.BeanExpressionContext;
import org.springframework.beans.factory.config.BeanExpressionResolver;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.core.MethodParameter;
import org.springframework.core.annotation.AnnotationUtils;
import org.springframework.expression.Expression;
import org.springframework.expression.spel.standard.SpelExpressionParser;
import org.springframework.kafka.KafkaException;
import org.springframework.kafka.support.KafkaUtils;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.converter.MessageConverter;
import org.springframework.messaging.handler.HandlerMethod;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.messaging.handler.annotation.SendTo;
import org.springframework.messaging.handler.annotation.support.PayloadMethodArgumentResolver;
import org.springframework.messaging.handler.invocation.InvocableHandlerMethod;
import org.springframework.util.Assert;
import org.springframework.validation.Validator;
/**
* Delegates to an {@link InvocableHandlerMethod} based on the message payload type.
* Matches a single, non-annotated parameter or one that is annotated with
* {@link org.springframework.messaging.handler.annotation.Payload}. Matches must be
* unambiguous.
*
* @author Gary Russell
*
*/
public class DelegatingInvocableHandler {
private static final SpelExpressionParser PARSER = new SpelExpressionParser();
private final List<InvocableHandlerMethod> handlers;
private final ConcurrentMap<Class<?>, InvocableHandlerMethod> cachedHandlers = new ConcurrentHashMap<>();
private final ConcurrentMap<InvocableHandlerMethod, MethodParameter> payloadMethodParameters =
new ConcurrentHashMap<>();
private final InvocableHandlerMethod defaultHandler;
private final Map<InvocableHandlerMethod, Expression> handlerSendTo = new ConcurrentHashMap<>();
private final Map<InvocableHandlerMethod, Boolean> handlerReturnsMessage = new ConcurrentHashMap<>();
private final Object bean;
private final BeanExpressionResolver resolver;
private final BeanExpressionContext beanExpressionContext;
private final ConfigurableListableBeanFactory beanFactory;
private final PayloadValidator validator;
/**
* Construct an instance with the supplied handlers for the bean.
* @param handlers the handlers.
* @param bean the bean.
* @param beanExpressionResolver the expression resolver.
* @param beanExpressionContext the expression context.
* @deprecated in favor of
* {@link #DelegatingInvocableHandler(List, InvocableHandlerMethod, Object, BeanExpressionResolver, BeanExpressionContext, BeanFactory, Validator)}
*/
@Deprecated
public DelegatingInvocableHandler(List<InvocableHandlerMethod> handlers, Object bean,
BeanExpressionResolver beanExpressionResolver, BeanExpressionContext beanExpressionContext) {
this(handlers, null, bean, beanExpressionResolver, beanExpressionContext, null, null);
}
/**
* Construct an instance with the supplied handlers for the bean.
* @param handlers the handlers.
* @param defaultHandler the default handler.
* @param bean the bean.
* @param beanExpressionResolver the resolver.
* @param beanExpressionContext the context.
* @since 2.1.3
* @deprecated in favor of
* {@link #DelegatingInvocableHandler(List, InvocableHandlerMethod, Object, BeanExpressionResolver, BeanExpressionContext, BeanFactory, Validator)}
*/
@Deprecated
public DelegatingInvocableHandler(List<InvocableHandlerMethod> handlers,
@Nullable InvocableHandlerMethod defaultHandler,
Object bean, BeanExpressionResolver beanExpressionResolver, BeanExpressionContext beanExpressionContext) {
this(handlers, defaultHandler, bean, beanExpressionResolver, beanExpressionContext, null, null);
}
/**
* Construct an instance with the supplied handlers for the bean.
* @param handlers the handlers.
* @param defaultHandler the default handler.
* @param bean the bean.
* @param beanExpressionResolver the resolver.
* @param beanExpressionContext the context.
* @param beanFactory the bean factory.
* @param validator the validator.
* @since 2.5.11
*/
public DelegatingInvocableHandler(List<InvocableHandlerMethod> handlers,
@Nullable InvocableHandlerMethod defaultHandler, Object bean,
@Nullable BeanExpressionResolver beanExpressionResolver,
@Nullable BeanExpressionContext beanExpressionContext,
@Nullable BeanFactory beanFactory, @Nullable Validator validator) {
this.handlers = new ArrayList<>();
for (InvocableHandlerMethod handler : handlers) {
this.handlers.add(wrapIfNecessary(handler));
}
this.defaultHandler = wrapIfNecessary(defaultHandler);
this.bean = bean;
this.resolver = beanExpressionResolver;
this.beanExpressionContext = beanExpressionContext;
this.beanFactory = beanFactory instanceof ConfigurableListableBeanFactory
? (ConfigurableListableBeanFactory) beanFactory
: null;
this.validator = validator == null ? null : new PayloadValidator(validator);
}
@Nullable
private InvocableHandlerMethod wrapIfNecessary(@Nullable InvocableHandlerMethod handler) {
if (handler == null) {
return null;
}
Parameter[] parameters = handler.getMethod().getParameters();
for (Parameter parameter : parameters) {
if (parameter.getType().equals(ConsumerRecordMetadata.class)) {
return new DelegatingInvocableHandler.MetadataAwareInvocableHandlerMethod(handler);
}
}
return handler;
}
/**
* Return the bean for this handler.
* @return the bean.
*/
public Object getBean() {
return this.bean;
}
/**
* Invoke the method with the given message.
* @param message the message.
* @param providedArgs additional arguments.
* @return the result of the invocation.
* @throws Exception raised if no suitable argument resolver can be found,
* or the method raised an exception.
*/
public Object invoke(Message<?> message, Object... providedArgs) throws Exception { //NOSONAR
Class<? extends Object> payloadClass = message.getPayload().getClass();
InvocableHandlerMethod handler = getHandlerForPayload(payloadClass);
if (this.validator != null && this.defaultHandler != null) {
MethodParameter parameter = this.payloadMethodParameters.get(handler);
if (parameter != null) {
this.validator.validate(message, parameter, message.getPayload());
}
}
Object result;
if (handler instanceof MetadataAwareInvocableHandlerMethod) {
Object[] args = new Object[providedArgs.length + 1];
args[0] = AdapterUtils.buildConsumerRecordMetadataFromArray(providedArgs);
System.arraycopy(providedArgs, 0, args, 1, providedArgs.length);
result = handler.invoke(message, args);
}
else {
result = handler.invoke(message, providedArgs);
}
Expression replyTo = this.handlerSendTo.get(handler);
return new InvocationResult(result, replyTo, this.handlerReturnsMessage.get(handler));
}
/**
* Determine the {@link InvocableHandlerMethod} for the provided type.
* @param payloadClass the payload class.
* @return the handler.
*/
protected InvocableHandlerMethod getHandlerForPayload(Class<? extends Object> payloadClass) {
InvocableHandlerMethod handler = this.cachedHandlers.get(payloadClass);
if (handler == null) {
handler = findHandlerForPayload(payloadClass);
if (handler == null) {
throw new KafkaException("No method found for " + payloadClass);
}
this.cachedHandlers.putIfAbsent(payloadClass, handler); //NOSONAR
setupReplyTo(handler);
}
return handler;
}
private void setupReplyTo(InvocableHandlerMethod handler) {
String replyTo = null;
Method method = handler.getMethod();
SendTo ann = null;
if (method != null) {
ann = AnnotationUtils.getAnnotation(method, SendTo.class);
replyTo = extractSendTo(method.toString(), ann);
}
if (ann == null) {
Class<?> beanType = handler.getBeanType();
ann = AnnotationUtils.getAnnotation(beanType, SendTo.class);
replyTo = extractSendTo(beanType.getSimpleName(), ann);
}
if (ann != null && replyTo == null) {
replyTo = AdapterUtils.getDefaultReplyTopicExpression();
}
if (replyTo != null) {
this.handlerSendTo.put(handler, PARSER.parseExpression(replyTo, AdapterUtils.PARSER_CONTEXT));
}
this.handlerReturnsMessage.put(handler, KafkaUtils.returnTypeMessageOrCollectionOf(method));
}
@Nullable
private String extractSendTo(String element, @Nullable SendTo ann) {
String replyTo = null;
if (ann != null) {
String[] destinations = ann.value();
if (destinations.length > 1) {
throw new IllegalStateException("Invalid @" + SendTo.class.getSimpleName() + " annotation on '"
+ element + "' one destination must be set (got " + Arrays.toString(destinations) + ")");
}
replyTo = destinations.length == 1 ? destinations[0] : null;
if (replyTo != null && this.beanFactory != null) {
replyTo = this.beanFactory.resolveEmbeddedValue(replyTo);
if (replyTo != null) {
replyTo = resolve(replyTo);
}
}
}
return replyTo;
}
private String resolve(String value) {
if (this.resolver != null && this.beanExpressionContext != null) {
Object newValue = this.resolver.evaluate(value, this.beanExpressionContext);
Assert.isInstanceOf(String.class, newValue, "Invalid @SendTo expression");
return (String) newValue;
}
else {
return value;
}
}
@Nullable
protected InvocableHandlerMethod findHandlerForPayload(Class<? extends Object> payloadClass) {
InvocableHandlerMethod result = null;
for (InvocableHandlerMethod handler : this.handlers) {
if (matchHandlerMethod(payloadClass, handler)) {
if (result != null) {
boolean resultIsDefault = result.equals(this.defaultHandler);
if (!handler.equals(this.defaultHandler) && !resultIsDefault) {
throw new KafkaException("Ambiguous methods for payload type: " + payloadClass + ": " +
result.getMethod().getName() + " and " + handler.getMethod().getName());
}
if (!resultIsDefault) {
continue; // otherwise replace the result with the actual match
}
}
result = handler;
}
}
return result != null ? result : this.defaultHandler;
}
protected boolean matchHandlerMethod(Class<? extends Object> payloadClass, InvocableHandlerMethod handler) {
Method method = handler.getMethod();
Annotation[][] parameterAnnotations = method.getParameterAnnotations();
// Single param; no annotation or not @Header
if (parameterAnnotations.length == 1) {
MethodParameter methodParameter = new MethodParameter(method, 0);
if ((methodParameter.getParameterAnnotations().length == 0
|| !methodParameter.hasParameterAnnotation(Header.class))
&& methodParameter.getParameterType().isAssignableFrom(payloadClass)) {
if (this.validator != null) {
this.payloadMethodParameters.put(handler, methodParameter);
}
return true;
}
}
MethodParameter foundCandidate = findCandidate(payloadClass, method, parameterAnnotations);
if (foundCandidate != null && this.validator != null) {
this.payloadMethodParameters.put(handler, foundCandidate);
}
return foundCandidate != null;
}
private MethodParameter findCandidate(Class<? extends Object> payloadClass, Method method,
Annotation[][] parameterAnnotations) {
MethodParameter foundCandidate = null;
for (int i = 0; i < parameterAnnotations.length; i++) {
MethodParameter methodParameter = new MethodParameter(method, i);
if ((methodParameter.getParameterAnnotations().length == 0
|| !methodParameter.hasParameterAnnotation(Header.class))
&& methodParameter.getParameterType().isAssignableFrom(payloadClass)) {
if (foundCandidate != null) {
throw new KafkaException("Ambiguous payload parameter for " + method.toGenericString());
}
foundCandidate = methodParameter;
}
}
return foundCandidate;
}
/**
* Return a string representation of the method that will be invoked for this payload.
* @param payload the payload.
* @return the method name.
*/
public String getMethodNameFor(Object payload) {
InvocableHandlerMethod handlerForPayload = getHandlerForPayload(payload.getClass());
return handlerForPayload == null ? "no match" : handlerForPayload.getMethod().toGenericString(); //NOSONAR
}
public boolean hasDefaultHandler() {
return this.defaultHandler != null;
}
/**
* A handler method that is aware of {@link ConsumerRecordMetadata}.
*
* @since 2.5
*/
private static final class MetadataAwareInvocableHandlerMethod extends InvocableHandlerMethod {
MetadataAwareInvocableHandlerMethod(HandlerMethod handlerMethod) {
super(handlerMethod);
}
}
private static final class PayloadValidator extends PayloadMethodArgumentResolver {
PayloadValidator(Validator validator) {
super(new MessageConverter() { // Required but never used
@Override
@Nullable
public Message<?> toMessage(Object payload, @Nullable
MessageHeaders headers) {
return null;
}
@Override
@Nullable
public Object fromMessage(Message<?> message, Class<?> targetClass) {
return null;
}
}, validator);
}
@Override
public void validate(Message<?> message, MethodParameter parameter, Object target) { // NOSONAR - public
super.validate(message, parameter, target);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/FilteringBatchMessageListenerAdapter.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.listener.BatchAcknowledgingConsumerAwareMessageListener;
import org.springframework.kafka.listener.BatchMessageListener;
import org.springframework.kafka.listener.ListenerType;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
/**
* A {@link BatchMessageListener} adapter that implements filter logic
* via a {@link RecordFilterStrategy}.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*
*/
public class FilteringBatchMessageListenerAdapter<K, V>
extends AbstractFilteringMessageListener<K, V, BatchMessageListener<K, V>>
implements BatchAcknowledgingConsumerAwareMessageListener<K, V> {
private final boolean ackDiscarded;
/**
* Create an instance with the supplied strategy and delegate listener.
* @param delegate the delegate.
* @param recordFilterStrategy the filter.
*/
public FilteringBatchMessageListenerAdapter(BatchMessageListener<K, V> delegate,
RecordFilterStrategy<K, V> recordFilterStrategy) {
super(delegate, recordFilterStrategy);
this.ackDiscarded = false;
}
/**
* Create an instance with the supplied strategy and delegate listener.
* When 'ackDiscarded' is false, and all messages are filtered, an empty list
* is passed to the delegate (so it can decide whether or not to ack); when true, a
* completely filtered batch is ack'd by this class, and no call is made to the delegate.
* @param delegate the delegate.
* @param recordFilterStrategy the filter.
* @param ackDiscarded true to ack (commit offset for) discarded messages when the
* listener is configured for manual acks.
*/
public FilteringBatchMessageListenerAdapter(BatchMessageListener<K, V> delegate,
RecordFilterStrategy<K, V> recordFilterStrategy, boolean ackDiscarded) {
super(delegate, recordFilterStrategy);
this.ackDiscarded = ackDiscarded;
}
@Override
public void onMessage(List<ConsumerRecord<K, V>> records, @Nullable Acknowledgment acknowledgment,
Consumer<?, ?> consumer) {
List<ConsumerRecord<K, V>> consumerRecords = getRecordFilterStrategy().filterBatch(records);
Assert.state(consumerRecords != null, "filter returned null from filterBatch");
boolean consumerAware = this.delegateType.equals(ListenerType.ACKNOWLEDGING_CONSUMER_AWARE)
|| this.delegateType.equals(ListenerType.CONSUMER_AWARE);
/*
* An empty list goes to the listener if ackDiscarded is false and the listener can ack
* either through the acknowledgment
*/
if (consumerRecords.size() > 0 || consumerAware
|| (!this.ackDiscarded && this.delegateType.equals(ListenerType.ACKNOWLEDGING))) {
invokeDelegate(consumerRecords, acknowledgment, consumer);
}
else {
if (this.ackDiscarded && acknowledgment != null) {
acknowledgment.acknowledge();
}
}
}
private void invokeDelegate(List<ConsumerRecord<K, V>> consumerRecords, Acknowledgment acknowledgment,
Consumer<?, ?> consumer) {
switch (this.delegateType) {
case ACKNOWLEDGING_CONSUMER_AWARE:
this.delegate.onMessage(consumerRecords, acknowledgment, consumer);
break;
case ACKNOWLEDGING:
this.delegate.onMessage(consumerRecords, acknowledgment);
break;
case CONSUMER_AWARE:
this.delegate.onMessage(consumerRecords, consumer);
break;
case SIMPLE:
this.delegate.onMessage(consumerRecords);
}
}
/*
* Since the container uses the delegate's type to determine which method to call, we
* must implement them all.
*/
@Override
public void onMessage(List<ConsumerRecord<K, V>> data) {
onMessage(data, null, null); // NOSONAR
}
@Override
public void onMessage(List<ConsumerRecord<K, V>> data, Acknowledgment acknowledgment) {
onMessage(data, acknowledgment, null); // NOSONAR
}
@Override
public void onMessage(List<ConsumerRecord<K, V>> data, Consumer<?, ?> consumer) {
onMessage(data, null, consumer);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/FilteringMessageListenerAdapter.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.listener.AcknowledgingConsumerAwareMessageListener;
import org.springframework.kafka.listener.MessageListener;
import org.springframework.kafka.support.Acknowledgment;
/**
* A {@link MessageListener} adapter that implements filter logic
* via a {@link RecordFilterStrategy}.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*
*/
public class FilteringMessageListenerAdapter<K, V>
extends AbstractFilteringMessageListener<K, V, MessageListener<K, V>>
implements AcknowledgingConsumerAwareMessageListener<K, V> {
private final boolean ackDiscarded;
/**
* Create an instance with the supplied strategy and delegate listener.
* @param delegate the delegate.
* @param recordFilterStrategy the filter.
*/
public FilteringMessageListenerAdapter(MessageListener<K, V> delegate,
RecordFilterStrategy<K, V> recordFilterStrategy) {
super(delegate, recordFilterStrategy);
this.ackDiscarded = false;
}
/**
* Create an instance with the supplied strategy and delegate listener.
* @param delegate the delegate.
* @param recordFilterStrategy the filter.
* @param ackDiscarded true to ack (commit offset for) discarded messages when the
* listener is configured for manual acks.
*/
public FilteringMessageListenerAdapter(MessageListener<K, V> delegate,
RecordFilterStrategy<K, V> recordFilterStrategy, boolean ackDiscarded) {
super(delegate, recordFilterStrategy);
this.ackDiscarded = ackDiscarded;
}
@Override
public void onMessage(ConsumerRecord<K, V> consumerRecord, Acknowledgment acknowledgment, Consumer<?, ?> consumer) {
if (!filter(consumerRecord)) {
switch (this.delegateType) {
case ACKNOWLEDGING_CONSUMER_AWARE:
this.delegate.onMessage(consumerRecord, acknowledgment, consumer);
break;
case ACKNOWLEDGING:
this.delegate.onMessage(consumerRecord, acknowledgment);
break;
case CONSUMER_AWARE:
this.delegate.onMessage(consumerRecord, consumer);
break;
case SIMPLE:
this.delegate.onMessage(consumerRecord);
}
}
else {
ackFilteredIfNecessary(acknowledgment);
}
}
private void ackFilteredIfNecessary(Acknowledgment acknowledgment) {
switch (this.delegateType) {
case ACKNOWLEDGING_CONSUMER_AWARE:
case ACKNOWLEDGING:
if (this.ackDiscarded && acknowledgment != null) {
acknowledgment.acknowledge();
}
break;
case CONSUMER_AWARE:
case SIMPLE:
}
}
/*
* Since the container uses the delegate's type to determine which method to call, we
* must implement them all.
*/
@Override
public void onMessage(ConsumerRecord<K, V> data) {
onMessage(data, null, null); // NOSONAR
}
@Override
public void onMessage(ConsumerRecord<K, V> data, Acknowledgment acknowledgment) {
onMessage(data, acknowledgment, null); // NOSONAR
}
@Override
public void onMessage(ConsumerRecord<K, V> data, Consumer<?, ?> consumer) {
onMessage(data, null, consumer);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/HandlerAdapter.java | /*
* Copyright 2015-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import org.springframework.messaging.Message;
import org.springframework.messaging.handler.invocation.InvocableHandlerMethod;
/**
* A wrapper for either an {@link InvocableHandlerMethod} or
* {@link DelegatingInvocableHandler}. All methods delegate to the
* underlying handler.
*
* @author Gary Russell
*
*/
public class HandlerAdapter {
private final InvocableHandlerMethod invokerHandlerMethod;
private final DelegatingInvocableHandler delegatingHandler;
/**
* Construct an instance with the provided method.
* @param invokerHandlerMethod the method.
*/
public HandlerAdapter(InvocableHandlerMethod invokerHandlerMethod) {
this.invokerHandlerMethod = invokerHandlerMethod;
this.delegatingHandler = null;
}
/**
* Construct an instance with the provided delegating handler.
* @param delegatingHandler the handler.
*/
public HandlerAdapter(DelegatingInvocableHandler delegatingHandler) {
this.invokerHandlerMethod = null;
this.delegatingHandler = delegatingHandler;
}
public Object invoke(Message<?> message, Object... providedArgs) throws Exception { //NOSONAR
if (this.invokerHandlerMethod != null) {
return this.invokerHandlerMethod.invoke(message, providedArgs); // NOSONAR
}
else if (this.delegatingHandler.hasDefaultHandler()) {
// Needed to avoid returning raw Message which matches Object
Object[] args = new Object[providedArgs.length + 1];
args[0] = message.getPayload();
System.arraycopy(providedArgs, 0, args, 1, providedArgs.length);
return this.delegatingHandler.invoke(message, args);
}
else {
return this.delegatingHandler.invoke(message, providedArgs);
}
}
public String getMethodAsString(Object payload) {
if (this.invokerHandlerMethod != null) {
return this.invokerHandlerMethod.getMethod().toGenericString();
}
else {
return this.delegatingHandler.getMethodNameFor(payload);
}
}
public Object getBean() {
if (this.invokerHandlerMethod != null) {
return this.invokerHandlerMethod.getBean();
}
else {
return this.delegatingHandler.getBean();
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/InvocationResult.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import org.springframework.expression.Expression;
import org.springframework.lang.Nullable;
/**
* The result of a method invocation.
*
* @author Gary Russell
* @since 2.2
*/
public final class InvocationResult {
@Nullable
private final Object result;
@Nullable
private final Expression sendTo;
private final boolean messageReturnType;
public InvocationResult(@Nullable Object result, @Nullable Expression sendTo, boolean messageReturnType) {
this.result = result;
this.sendTo = sendTo;
this.messageReturnType = messageReturnType;
}
@Nullable
public Object getResult() {
return this.result;
}
@Nullable
public Expression getSendTo() {
return this.sendTo;
}
public boolean isMessageReturnType() {
return this.messageReturnType;
}
@Override
public String toString() {
return "InvocationResult [result=" + this.result
+ ", sendTo=" + (this.sendTo == null ? "null" : this.sendTo.getExpressionString())
+ ", messageReturnType=" + this.messageReturnType + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/KafkaBackoffAwareMessageListenerAdapter.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import java.math.BigInteger;
import java.time.Clock;
import java.util.Optional;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.springframework.kafka.listener.AcknowledgingConsumerAwareMessageListener;
import org.springframework.kafka.listener.KafkaBackoffException;
import org.springframework.kafka.listener.KafkaConsumerBackoffManager;
import org.springframework.kafka.listener.MessageListener;
import org.springframework.kafka.listener.TimestampedException;
import org.springframework.kafka.retrytopic.RetryTopicHeaders;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.lang.Nullable;
/**
*
* A {@link AcknowledgingConsumerAwareMessageListener} implementation that looks for a
* backoff dueTimestamp header and invokes a {@link KafkaConsumerBackoffManager} instance
* that will back off if necessary.
*
* @param <K> the record key type.
* @param <V> the record value type.
* @author Tomaz Fernandes
* @since 2.7
*
*/
public class KafkaBackoffAwareMessageListenerAdapter<K, V>
extends AbstractDelegatingMessageListenerAdapter<MessageListener<K, V>>
implements AcknowledgingConsumerAwareMessageListener<K, V> {
private final String listenerId;
private final String backoffTimestampHeader;
private final Clock clock;
private final KafkaConsumerBackoffManager kafkaConsumerBackoffManager;
/**
* The configuration for this listener adapter.
*
* @param delegate the MessageListener instance that will handle the messages.
* @param kafkaConsumerBackoffManager the manager that will handle the back off.
* @param listenerId the id of the listener container associated to this adapter.
* @param backoffTimestampHeader the header name that will be looked up in the
* incoming record to acquire the timestamp.
* @param clock the clock instance that will be used to timestamp the
* exception throwing.
* @since 2.7
*/
public KafkaBackoffAwareMessageListenerAdapter(MessageListener<K, V> delegate,
KafkaConsumerBackoffManager kafkaConsumerBackoffManager,
String listenerId,
String backoffTimestampHeader,
Clock clock) {
super(delegate);
this.listenerId = listenerId;
this.kafkaConsumerBackoffManager = kafkaConsumerBackoffManager;
this.backoffTimestampHeader = backoffTimestampHeader;
this.clock = clock;
}
public KafkaBackoffAwareMessageListenerAdapter(MessageListener<K, V> adapter,
KafkaConsumerBackoffManager kafkaConsumerBackoffManager, String listenerId, Clock clock) throws KafkaBackoffException {
this(adapter, kafkaConsumerBackoffManager, listenerId, RetryTopicHeaders.DEFAULT_HEADER_BACKOFF_TIMESTAMP, clock);
}
@Override
public void onMessage(ConsumerRecord<K, V> consumerRecord, @Nullable Acknowledgment acknowledgment,
@Nullable Consumer<?, ?> consumer) {
maybeGetBackoffTimestamp(consumerRecord)
.ifPresent(nextExecutionTimestamp -> this.kafkaConsumerBackoffManager
.backOffIfNecessary(createContext(consumerRecord, nextExecutionTimestamp, consumer)));
try {
invokeDelegateOnMessage(consumerRecord, acknowledgment, consumer);
}
catch (Exception ex) {
throw new TimestampedException(ex, this.clock);
}
}
private void invokeDelegateOnMessage(ConsumerRecord<K, V> consumerRecord, Acknowledgment acknowledgment, Consumer<?, ?> consumer) {
switch (this.delegateType) {
case ACKNOWLEDGING_CONSUMER_AWARE:
this.delegate.onMessage(consumerRecord, acknowledgment, consumer);
break;
case ACKNOWLEDGING:
this.delegate.onMessage(consumerRecord, acknowledgment);
break;
case CONSUMER_AWARE:
this.delegate.onMessage(consumerRecord, consumer);
break;
case SIMPLE:
this.delegate.onMessage(consumerRecord);
}
}
private KafkaConsumerBackoffManager.Context createContext(ConsumerRecord<K, V> data, long nextExecutionTimestamp,
Consumer<?, ?> consumer) {
return this.kafkaConsumerBackoffManager.createContext(nextExecutionTimestamp, this.listenerId,
new TopicPartition(data.topic(), data.partition()), consumer);
}
private Optional<Long> maybeGetBackoffTimestamp(ConsumerRecord<K, V> data) {
return Optional
.ofNullable(data.headers().lastHeader(this.backoffTimestampHeader))
.map(timestampHeader -> new BigInteger(timestampHeader.value()).longValue());
}
/*
* Since the container uses the delegate's type to determine which method to call, we
* must implement them all.
*/
@Override
public void onMessage(ConsumerRecord<K, V> data) {
onMessage(data, null, null); // NOSONAR
}
@Override
public void onMessage(ConsumerRecord<K, V> data, Acknowledgment acknowledgment) {
onMessage(data, acknowledgment, null); // NOSONAR
}
@Override
public void onMessage(ConsumerRecord<K, V> data, Consumer<?, ?> consumer) {
onMessage(data, null, consumer);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/MessagingMessageListenerAdapter.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import java.lang.reflect.Method;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.lang.reflect.WildcardType;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.TopicPartition;
import org.springframework.context.expression.MapAccessor;
import org.springframework.core.MethodParameter;
import org.springframework.core.log.LogAccessor;
import org.springframework.expression.BeanResolver;
import org.springframework.expression.Expression;
import org.springframework.expression.common.LiteralExpression;
import org.springframework.expression.spel.standard.SpelExpressionParser;
import org.springframework.expression.spel.support.StandardEvaluationContext;
import org.springframework.expression.spel.support.StandardTypeConverter;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.ConsumerSeekAware;
import org.springframework.kafka.listener.ListenerExecutionFailedException;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.KafkaNull;
import org.springframework.kafka.support.KafkaUtils;
import org.springframework.kafka.support.converter.MessagingMessageConverter;
import org.springframework.kafka.support.converter.RecordMessageConverter;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.MessagingException;
import org.springframework.messaging.converter.MessageConversionException;
import org.springframework.messaging.converter.SmartMessageConverter;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.messaging.handler.annotation.Payload;
import org.springframework.messaging.handler.annotation.support.MethodArgumentNotValidException;
import org.springframework.messaging.support.GenericMessage;
import org.springframework.messaging.support.MessageBuilder;
import org.springframework.util.Assert;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
/**
* An abstract {@link org.springframework.kafka.listener.MessageListener} adapter
* providing the necessary infrastructure to extract the payload of a
* {@link org.springframework.messaging.Message}.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Stephane Nicoll
* @author Gary Russell
* @author Artem Bilan
* @author Venil Noronha
*/
public abstract class MessagingMessageListenerAdapter<K, V> implements ConsumerSeekAware {
private static final SpelExpressionParser PARSER = new SpelExpressionParser();
/**
* Message used when no conversion is needed.
*/
protected static final Message<KafkaNull> NULL_MESSAGE = new GenericMessage<>(KafkaNull.INSTANCE); // NOSONAR
private final Object bean;
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); //NOSONAR
private final Type inferredType;
private final StandardEvaluationContext evaluationContext = new StandardEvaluationContext();
private HandlerAdapter handlerMethod;
private boolean isConsumerRecordList;
private boolean isConsumerRecords;
private boolean isMessageList;
private boolean conversionNeeded = true;
private RecordMessageConverter messageConverter = new MessagingMessageConverter();
private boolean converterSet;
private Type fallbackType = Object.class;
private Expression replyTopicExpression;
@SuppressWarnings("rawtypes")
private KafkaTemplate replyTemplate;
private boolean hasAckParameter;
private boolean hasMetadataParameter;
private boolean messageReturnType;
private ReplyHeadersConfigurer replyHeadersConfigurer;
private boolean splitIterables = true;
public MessagingMessageListenerAdapter(Object bean, Method method) {
this.bean = bean;
this.inferredType = determineInferredType(method); // NOSONAR = intentionally not final
}
/**
* Set the MessageConverter.
* @param messageConverter the converter.
*/
public void setMessageConverter(RecordMessageConverter messageConverter) {
this.messageConverter = messageConverter;
this.converterSet = true;
}
/**
* Return the {@link MessagingMessageConverter} for this listener,
* being able to convert {@link org.springframework.messaging.Message}.
* @return the {@link MessagingMessageConverter} for this listener,
* being able to convert {@link org.springframework.messaging.Message}.
*/
protected final RecordMessageConverter getMessageConverter() {
return this.messageConverter;
}
/**
* Set the {@link SmartMessageConverter} to use with the default
* {@link MessagingMessageConverter}. Not allowed when a custom
* {@link #setMessageConverter(RecordMessageConverter) messageConverter} is provided.
* @param messageConverter the converter.
* @since 2.7.1
*/
public void setMessagingConverter(SmartMessageConverter messageConverter) {
Assert.isTrue(!this.converterSet, "Cannot set the SmartMessageConverter when setting the messageConverter, "
+ "add the SmartConverter to the message converter instead");
((MessagingMessageConverter) this.messageConverter).setMessagingConverter(messageConverter);
}
/**
* Returns the inferred type for conversion or, if null, the
* {@link #setFallbackType(Class) fallbackType}.
* @return the type.
*/
protected Type getType() {
return this.inferredType == null ? this.fallbackType : this.inferredType;
}
/**
* Set a fallback type to use when using a type-aware message converter and this
* adapter cannot determine the inferred type from the method. An example of a
* type-aware message converter is the {@code StringJsonMessageConverter}. Defaults to
* {@link Object}.
* @param fallbackType the type.
*/
public void setFallbackType(Class<?> fallbackType) {
this.fallbackType = fallbackType;
}
/**
* Set the {@link HandlerAdapter} to use to invoke the method
* processing an incoming {@link ConsumerRecord}.
* @param handlerMethod {@link HandlerAdapter} instance.
*/
public void setHandlerMethod(HandlerAdapter handlerMethod) {
this.handlerMethod = handlerMethod;
}
protected boolean isConsumerRecordList() {
return this.isConsumerRecordList;
}
public boolean isConsumerRecords() {
return this.isConsumerRecords;
}
public boolean isConversionNeeded() {
return this.conversionNeeded;
}
/**
* Set the topic to which to send any result from the method invocation.
* May be a SpEL expression {@code !{...}} evaluated at runtime.
* @param replyTopicParam the topic or expression.
* @since 2.0
*/
public void setReplyTopic(String replyTopicParam) {
String replyTopic = replyTopicParam;
if (!StringUtils.hasText(replyTopic)) {
replyTopic = AdapterUtils.getDefaultReplyTopicExpression();
}
if (replyTopic.contains(AdapterUtils.PARSER_CONTEXT.getExpressionPrefix())) {
this.replyTopicExpression = PARSER.parseExpression(replyTopic, AdapterUtils.PARSER_CONTEXT);
}
else {
this.replyTopicExpression = new LiteralExpression(replyTopic);
}
}
/**
* Set the template to use to send any result from the method invocation.
* @param replyTemplate the template.
* @since 2.0
*/
public void setReplyTemplate(KafkaTemplate<?, ?> replyTemplate) {
this.replyTemplate = replyTemplate;
}
/**
* Set a bean resolver for runtime SpEL expressions. Also configures the evaluation
* context with a standard type converter and map accessor.
* @param beanResolver the resolver.
* @since 2.0
*/
public void setBeanResolver(BeanResolver beanResolver) {
this.evaluationContext.setBeanResolver(beanResolver);
this.evaluationContext.setTypeConverter(new StandardTypeConverter());
this.evaluationContext.addPropertyAccessor(new MapAccessor());
}
protected boolean isMessageList() {
return this.isMessageList;
}
/**
* Return the reply configurer.
* @return the configurer.
* @since 2.2
* @see #setReplyHeadersConfigurer(ReplyHeadersConfigurer)
*/
protected ReplyHeadersConfigurer getReplyHeadersConfigurer() {
return this.replyHeadersConfigurer;
}
/**
* Set a configurer which will be invoked when creating a reply message.
* @param replyHeadersConfigurer the configurer.
* @since 2.2
*/
public void setReplyHeadersConfigurer(ReplyHeadersConfigurer replyHeadersConfigurer) {
this.replyHeadersConfigurer = replyHeadersConfigurer;
}
/**
* When true, {@link Iterable} return results will be split into discrete records.
* @return true to split.
* @since 2.3.5
*/
protected boolean isSplitIterables() {
return this.splitIterables;
}
/**
* Set to false to disable splitting {@link Iterable} reply values into separate
* records.
* @param splitIterables false to disable; default true.
* @since 2.3.5
*/
public void setSplitIterables(boolean splitIterables) {
this.splitIterables = splitIterables;
}
@Override
public void registerSeekCallback(ConsumerSeekCallback callback) {
if (this.bean instanceof ConsumerSeekAware) {
((ConsumerSeekAware) this.bean).registerSeekCallback(callback);
}
}
@Override
public void onPartitionsAssigned(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
if (this.bean instanceof ConsumerSeekAware) {
((ConsumerSeekAware) this.bean).onPartitionsAssigned(assignments, callback);
}
}
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
if (this.bean instanceof ConsumerSeekAware) {
((ConsumerSeekAware) this.bean).onPartitionsRevoked(partitions);
}
}
@Override
public void onIdleContainer(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
if (this.bean instanceof ConsumerSeekAware) {
((ConsumerSeekAware) this.bean).onIdleContainer(assignments, callback);
}
}
protected Message<?> toMessagingMessage(ConsumerRecord<K, V> record, @Nullable Acknowledgment acknowledgment,
Consumer<?, ?> consumer) {
return getMessageConverter().toMessage(record, acknowledgment, consumer, getType());
}
/**
* Invoke the handler, wrapping any exception to a {@link ListenerExecutionFailedException}
* with a dedicated error message.
* @param data the data to process during invocation.
* @param acknowledgment the acknowledgment to use if any.
* @param message the message to process.
* @param consumer the consumer.
* @return the result of invocation.
*/
protected final Object invokeHandler(Object data, @Nullable Acknowledgment acknowledgment, Message<?> message,
Consumer<?, ?> consumer) {
try {
if (data instanceof List && !this.isConsumerRecordList) {
return this.handlerMethod.invoke(message, acknowledgment, consumer);
}
else {
if (this.hasMetadataParameter) {
return this.handlerMethod.invoke(message, data, acknowledgment, consumer,
AdapterUtils.buildConsumerRecordMetadata(data));
}
else {
return this.handlerMethod.invoke(message, data, acknowledgment, consumer);
}
}
}
catch (org.springframework.messaging.converter.MessageConversionException ex) {
throw checkAckArg(acknowledgment, message, new MessageConversionException("Cannot handle message", ex));
}
catch (MethodArgumentNotValidException ex) {
throw checkAckArg(acknowledgment, message, ex);
}
catch (MessagingException ex) {
throw new ListenerExecutionFailedException(createMessagingErrorMessage("Listener method could not " +
"be invoked with the incoming message", message.getPayload()), ex);
}
catch (Exception ex) {
throw new ListenerExecutionFailedException("Listener method '" +
this.handlerMethod.getMethodAsString(message.getPayload()) + "' threw exception", ex);
}
}
private RuntimeException checkAckArg(@Nullable Acknowledgment acknowledgment, Message<?> message, Exception ex) {
if (this.hasAckParameter && acknowledgment == null) {
return new ListenerExecutionFailedException("invokeHandler Failed",
new IllegalStateException("No Acknowledgment available as an argument, "
+ "the listener container must have a MANUAL AckMode to populate the Acknowledgment.",
ex));
}
return new ListenerExecutionFailedException(createMessagingErrorMessage("Listener method could not " +
"be invoked with the incoming message", message.getPayload()), ex);
}
/**
* Handle the given result object returned from the listener method, sending a
* response message to the SendTo topic.
* @param resultArg the result object to handle (never <code>null</code>)
* @param request the original request message
* @param source the source data for the method invocation - e.g.
* {@code o.s.messaging.Message<?>}; may be null
*/
protected void handleResult(Object resultArg, Object request, Object source) {
this.logger.debug(() -> "Listener method returned result [" + resultArg
+ "] - generating response message for it");
boolean isInvocationResult = resultArg instanceof InvocationResult;
Object result = isInvocationResult ? ((InvocationResult) resultArg).getResult() : resultArg;
String replyTopic = evaluateReplyTopic(request, source, resultArg);
Assert.state(replyTopic == null || this.replyTemplate != null,
"a KafkaTemplate is required to support replies");
sendResponse(result, replyTopic, source, isInvocationResult
? ((InvocationResult) resultArg).isMessageReturnType() : this.messageReturnType);
}
@Nullable
private String evaluateReplyTopic(Object request, Object source, Object result) {
String replyTo = null;
if (result instanceof InvocationResult) {
replyTo = evaluateTopic(request, source, result, ((InvocationResult) result).getSendTo());
}
else if (this.replyTopicExpression != null) {
replyTo = evaluateTopic(request, source, result, this.replyTopicExpression);
}
return replyTo;
}
@Nullable
private String evaluateTopic(Object request, Object source, Object result, @Nullable Expression sendTo) {
if (sendTo instanceof LiteralExpression) {
return sendTo.getValue(String.class);
}
else {
Object value = sendTo == null ? null
: sendTo.getValue(this.evaluationContext, new ReplyExpressionRoot(request, source, result));
boolean isByteArray = value instanceof byte[];
if (!(value == null || value instanceof String || isByteArray)) {
throw new IllegalStateException(
"replyTopic expression must evaluate to a String or byte[], it is: "
+ value.getClass().getName());
}
if (isByteArray) {
return new String((byte[]) value, StandardCharsets.UTF_8); // NOSONAR
}
return (String) value;
}
}
/**
* Send the result to the topic.
*
* @param result the result.
* @param topic the topic.
* @param source the source (input).
* @param returnTypeMessage true if we are returning message(s).
* @since 2.1.3
*/
@SuppressWarnings("unchecked")
protected void sendResponse(Object result, String topic, @Nullable Object source, boolean returnTypeMessage) {
if (!returnTypeMessage && topic == null) {
this.logger.debug(() -> "No replyTopic to handle the reply: " + result);
}
else if (result instanceof Message) {
Message<?> reply = checkHeaders(result, topic, source);
this.replyTemplate.send(reply);
}
else {
if (result instanceof Iterable) {
Iterator<?> iterator = ((Iterable<?>) result).iterator();
boolean iterableOfMessages = false;
if (iterator.hasNext()) {
iterableOfMessages = iterator.next() instanceof Message;
}
if (iterableOfMessages || this.splitIterables) {
((Iterable<V>) result).forEach(v -> {
if (v instanceof Message) {
this.replyTemplate.send((Message<?>) v);
}
else {
this.replyTemplate.send(topic, v);
}
});
}
else {
sendSingleResult(result, topic, source);
}
}
else {
sendSingleResult(result, topic, source);
}
}
}
private Message<?> checkHeaders(Object result, String topic, Object source) { // NOSONAR (complexity)
Message<?> reply = (Message<?>) result;
MessageHeaders headers = reply.getHeaders();
boolean needsTopic = headers.get(KafkaHeaders.TOPIC) == null;
boolean sourceIsMessage = source instanceof Message;
boolean needsCorrelation = headers.get(KafkaHeaders.CORRELATION_ID) == null && sourceIsMessage;
boolean needsPartition = headers.get(KafkaHeaders.PARTITION_ID) == null && sourceIsMessage
&& getReplyPartition((Message<?>) source) != null;
if (needsTopic || needsCorrelation || needsPartition) {
MessageBuilder<?> builder = MessageBuilder.fromMessage(reply);
if (needsTopic) {
builder.setHeader(KafkaHeaders.TOPIC, topic);
}
if (needsCorrelation && sourceIsMessage) {
builder.setHeader(KafkaHeaders.CORRELATION_ID,
((Message<?>) source).getHeaders().get(KafkaHeaders.CORRELATION_ID));
}
if (sourceIsMessage && reply.getHeaders().get(KafkaHeaders.REPLY_PARTITION) == null) {
setPartition(builder, (Message<?>) source);
}
reply = builder.build();
}
return reply;
}
@SuppressWarnings("unchecked")
private void sendSingleResult(Object result, String topic, @Nullable Object source) {
byte[] correlationId = null;
boolean sourceIsMessage = source instanceof Message;
if (sourceIsMessage
&& ((Message<?>) source).getHeaders().get(KafkaHeaders.CORRELATION_ID) != null) {
correlationId = ((Message<?>) source).getHeaders().get(KafkaHeaders.CORRELATION_ID, byte[].class);
}
if (sourceIsMessage) {
sendReplyForMessageSource(result, topic, source, correlationId);
}
else {
this.replyTemplate.send(topic, result);
}
}
@SuppressWarnings("unchecked")
private void sendReplyForMessageSource(Object result, String topic, Object source, byte[] correlationId) {
MessageBuilder<Object> builder = MessageBuilder.withPayload(result)
.setHeader(KafkaHeaders.TOPIC, topic);
if (this.replyHeadersConfigurer != null) {
Map<String, Object> headersToCopy = ((Message<?>) source).getHeaders().entrySet().stream()
.filter(e -> {
String key = e.getKey();
return !key.equals(MessageHeaders.ID) && !key.equals(MessageHeaders.TIMESTAMP)
&& !key.equals(KafkaHeaders.CORRELATION_ID)
&& !key.startsWith(KafkaHeaders.RECEIVED);
})
.filter(e -> this.replyHeadersConfigurer.shouldCopy(e.getKey(), e.getValue()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
if (headersToCopy.size() > 0) {
builder.copyHeaders(headersToCopy);
}
headersToCopy = this.replyHeadersConfigurer.additionalHeaders();
if (!ObjectUtils.isEmpty(headersToCopy)) {
builder.copyHeaders(headersToCopy);
}
}
if (correlationId != null) {
builder.setHeader(KafkaHeaders.CORRELATION_ID, correlationId);
}
setPartition(builder, ((Message<?>) source));
this.replyTemplate.send(builder.build());
}
private void setPartition(MessageBuilder<?> builder, Message<?> source) {
byte[] partitionBytes = getReplyPartition(source);
if (partitionBytes != null) {
builder.setHeader(KafkaHeaders.PARTITION_ID, ByteBuffer.wrap(partitionBytes).getInt());
}
}
@Nullable
private byte[] getReplyPartition(Message<?> source) {
return source.getHeaders().get(KafkaHeaders.REPLY_PARTITION, byte[].class);
}
protected final String createMessagingErrorMessage(String description, Object payload) {
return description + "\n"
+ "Endpoint handler details:\n"
+ "Method [" + this.handlerMethod.getMethodAsString(payload) + "]\n"
+ "Bean [" + this.handlerMethod.getBean() + "]";
}
/**
* Subclasses can override this method to use a different mechanism to determine
* the target type of the payload conversion.
* @param method the method.
* @return the type.
*/
protected Type determineInferredType(Method method) { // NOSONAR complexity
if (method == null) {
return null;
}
Type genericParameterType = null;
int allowedBatchParameters = 1;
int notConvertibleParameters = 0;
for (int i = 0; i < method.getParameterCount(); i++) {
MethodParameter methodParameter = new MethodParameter(method, i);
/*
* We're looking for a single non-annotated parameter, or one annotated with @Payload.
* We ignore parameters with type Message, Consumer, Ack, ConsumerRecord because they
* are not involved with conversion.
*/
Type parameterType = methodParameter.getGenericParameterType();
boolean isNotConvertible = parameterIsType(parameterType, ConsumerRecord.class);
boolean isAck = parameterIsType(parameterType, Acknowledgment.class);
this.hasAckParameter |= isAck;
isNotConvertible |= isAck;
boolean isConsumer = parameterIsType(parameterType, Consumer.class);
isNotConvertible |= isConsumer;
boolean isMeta = parameterIsType(parameterType, ConsumerRecordMetadata.class);
this.hasMetadataParameter |= isMeta;
isNotConvertible |= isMeta;
if (isNotConvertible) {
notConvertibleParameters++;
}
if (!isNotConvertible && !isMessageWithNoTypeInfo(parameterType)
&& (methodParameter.getParameterAnnotations().length == 0
|| methodParameter.hasParameterAnnotation(Payload.class))) {
if (genericParameterType == null) {
genericParameterType = extractGenericParameterTypFromMethodParameter(methodParameter);
}
else {
this.logger.debug(() -> "Ambiguous parameters for target payload for method " + method
+ "; no inferred type available");
break;
}
}
else if (isAck) {
allowedBatchParameters++;
}
else if (methodParameter.hasParameterAnnotation(Header.class)) {
Header header = methodParameter.getParameterAnnotation(Header.class);
if (header != null && KafkaHeaders.GROUP_ID.equals(header.value())) {
allowedBatchParameters++;
}
}
else {
if (isConsumer) {
allowedBatchParameters++;
}
else {
if (parameterType instanceof ParameterizedType
&& ((ParameterizedType) parameterType).getRawType().equals(Consumer.class)) {
allowedBatchParameters++;
}
}
}
}
if (notConvertibleParameters == method.getParameterCount() && method.getReturnType().equals(void.class)) {
this.conversionNeeded = false;
}
boolean validParametersForBatch = method.getGenericParameterTypes().length <= allowedBatchParameters;
if (!validParametersForBatch) {
String stateMessage = "A parameter of type '%s' must be the only parameter "
+ "(except for an optional 'Acknowledgment' and/or 'Consumer' "
+ "and/or '@Header(KafkaHeaders.GROUP_ID) String groupId'";
Assert.state(!this.isConsumerRecords,
() -> String.format(stateMessage, "ConsumerRecords"));
Assert.state(!this.isConsumerRecordList,
() -> String.format(stateMessage, "List<ConsumerRecord>"));
Assert.state(!this.isMessageList,
() -> String.format(stateMessage, "List<Message<?>>"));
}
this.messageReturnType = KafkaUtils.returnTypeMessageOrCollectionOf(method);
return genericParameterType;
}
private Type extractGenericParameterTypFromMethodParameter(MethodParameter methodParameter) {
Type genericParameterType = methodParameter.getGenericParameterType();
if (genericParameterType instanceof ParameterizedType) {
ParameterizedType parameterizedType = (ParameterizedType) genericParameterType;
if (parameterizedType.getRawType().equals(Message.class)) {
genericParameterType = ((ParameterizedType) genericParameterType).getActualTypeArguments()[0];
}
else if (parameterizedType.getRawType().equals(List.class)
&& parameterizedType.getActualTypeArguments().length == 1) {
Type paramType = parameterizedType.getActualTypeArguments()[0];
this.isConsumerRecordList = paramType.equals(ConsumerRecord.class)
|| (isSimpleListOfConsumerRecord(paramType)
|| isListOfConsumerRecordUpperBounded(paramType));
boolean messageHasGeneric = paramType instanceof ParameterizedType
&& ((ParameterizedType) paramType).getRawType().equals(Message.class);
this.isMessageList = paramType.equals(Message.class) || messageHasGeneric;
if (messageHasGeneric) {
genericParameterType = ((ParameterizedType) paramType).getActualTypeArguments()[0];
}
}
else {
this.isConsumerRecords = parameterizedType.getRawType().equals(ConsumerRecords.class);
}
}
return genericParameterType;
}
private boolean isSimpleListOfConsumerRecord(Type paramType) {
return paramType instanceof ParameterizedType
&& ((ParameterizedType) paramType).getRawType().equals(ConsumerRecord.class);
}
private boolean isListOfConsumerRecordUpperBounded(Type paramType) {
return isWildCardWithUpperBound(paramType)
&& ((WildcardType) paramType).getUpperBounds()[0] instanceof ParameterizedType
&& ((ParameterizedType) ((WildcardType) paramType).getUpperBounds()[0])
.getRawType().equals(ConsumerRecord.class);
}
private boolean isWildCardWithUpperBound(Type paramType) {
return paramType instanceof WildcardType
&& ((WildcardType) paramType).getUpperBounds() != null
&& ((WildcardType) paramType).getUpperBounds().length > 0;
}
private boolean isMessageWithNoTypeInfo(Type parameterType) {
if (parameterType instanceof ParameterizedType) {
ParameterizedType parameterizedType = (ParameterizedType) parameterType;
Type rawType = parameterizedType.getRawType();
if (rawType.equals(Message.class)) {
return parameterizedType.getActualTypeArguments()[0] instanceof WildcardType;
}
}
return parameterType.equals(Message.class); // could be Message without a generic type
}
private boolean parameterIsType(Type parameterType, Type type) {
if (parameterType instanceof ParameterizedType) {
ParameterizedType parameterizedType = (ParameterizedType) parameterType;
Type rawType = parameterizedType.getRawType();
if (rawType.equals(type)) {
return true;
}
}
return parameterType.equals(type);
}
/**
* Root object for reply expression evaluation.
* @since 2.0
*/
public static final class ReplyExpressionRoot {
private final Object request;
private final Object source;
private final Object result;
public ReplyExpressionRoot(Object request, Object source, Object result) {
this.request = request;
this.source = source;
this.result = result;
}
public Object getRequest() {
return this.request;
}
public Object getSource() {
return this.source;
}
public Object getResult() {
return this.result;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/RecordFilterStrategy.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import java.util.Iterator;
import java.util.List;
import org.apache.kafka.clients.consumer.ConsumerRecord;
/**
* Implementations of this interface can signal that a record about
* to be delivered to a message listener should be discarded instead
* of being delivered.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*
*/
public interface RecordFilterStrategy<K, V> {
/**
* Return true if the record should be discarded.
* @param consumerRecord the record.
* @return true to discard.
*/
boolean filter(ConsumerRecord<K, V> consumerRecord);
/**
* Filter an entire batch of records; to filter all records, return an empty list, not
* null.
* @param records the records.
* @return the filtered records.
* @since 2.8
*/
default List<ConsumerRecord<K, V>> filterBatch(List<ConsumerRecord<K, V>> records) {
Iterator<ConsumerRecord<K, V>> iterator = records.iterator();
while (iterator.hasNext()) {
if (filter(iterator.next())) {
iterator.remove();
}
}
return records;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/RecordMessagingMessageListenerAdapter.java | /*
* Copyright 2002-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import java.lang.reflect.Method;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.listener.AcknowledgingConsumerAwareMessageListener;
import org.springframework.kafka.listener.KafkaListenerErrorHandler;
import org.springframework.kafka.listener.ListenerExecutionFailedException;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.converter.ProjectingMessageConverter;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
import org.springframework.messaging.support.GenericMessage;
/**
* A {@link org.springframework.kafka.listener.MessageListener MessageListener}
* adapter that invokes a configurable {@link HandlerAdapter}; used when the factory is
* configured for the listener to receive individual messages.
*
* <p>Wraps the incoming Kafka Message to Spring's {@link Message} abstraction.
*
* <p>The original {@link ConsumerRecord} and
* the {@link Acknowledgment} are provided as additional arguments so that these can
* be injected as method arguments if necessary.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Stephane Nicoll
* @author Gary Russell
* @author Artem Bilan
* @author Venil Noronha
*/
public class RecordMessagingMessageListenerAdapter<K, V> extends MessagingMessageListenerAdapter<K, V>
implements AcknowledgingConsumerAwareMessageListener<K, V> {
private KafkaListenerErrorHandler errorHandler;
public RecordMessagingMessageListenerAdapter(Object bean, Method method) {
this(bean, method, null);
}
public RecordMessagingMessageListenerAdapter(Object bean, Method method,
@Nullable KafkaListenerErrorHandler errorHandler) {
super(bean, method);
this.errorHandler = errorHandler;
}
/**
* Kafka {@link AcknowledgingConsumerAwareMessageListener} entry point.
* <p> Delegate the message to the target listener method,
* with appropriate conversion of the message argument.
* @param record the incoming Kafka {@link ConsumerRecord}.
* @param acknowledgment the acknowledgment.
* @param consumer the consumer.
*/
@Override
public void onMessage(ConsumerRecord<K, V> record, @Nullable Acknowledgment acknowledgment,
Consumer<?, ?> consumer) {
Message<?> message;
if (isConversionNeeded()) {
message = toMessagingMessage(record, acknowledgment, consumer);
}
else {
message = NULL_MESSAGE;
}
if (logger.isDebugEnabled() && !(getMessageConverter() instanceof ProjectingMessageConverter)) {
this.logger.debug("Processing [" + message + "]");
}
try {
Object result = invokeHandler(record, acknowledgment, message, consumer);
if (result != null) {
handleResult(result, record, message);
}
}
catch (ListenerExecutionFailedException e) { // NOSONAR ex flow control
if (this.errorHandler != null) {
try {
if (message.equals(NULL_MESSAGE)) {
message = new GenericMessage<>(record);
}
Object result = this.errorHandler.handleError(message, e, consumer);
if (result != null) {
handleResult(result, record, message);
}
}
catch (Exception ex) {
throw new ListenerExecutionFailedException(createMessagingErrorMessage(// NOSONAR stack trace loss
"Listener error handler threw an exception for the incoming message",
message.getPayload()), ex);
}
}
else {
throw e;
}
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/ReplyHeadersConfigurer.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import java.util.Map;
import org.springframework.lang.Nullable;
/**
* A strategy for configuring which headers, if any, should be set in a reply message.
*
* @author Gary Russell
* @since 2.2
*
*/
@FunctionalInterface
public interface ReplyHeadersConfigurer {
/**
* Return true if the header should be copied to the reply message.
* {@link org.springframework.kafka.support.KafkaHeaders#CORRELATION_ID} will not be
* offered; it is always copied.
* {@link org.springframework.messaging.MessageHeaders#ID} and
* {@link org.springframework.messaging.MessageHeaders#TIMESTAMP} are never copied.
* {@code KafkaHeaders.RECEIVED*} headers are never copied.
* @param headerName the header name.
* @param headerValue the header value.
* @return true to copy.
*/
boolean shouldCopy(String headerName, Object headerValue);
/**
* A map of additional headers to add to the reply message.
* IMPORTANT: Any existing headers with the same name will be replaced by those
* returned by this method.
* @return the headers.
*/
@Nullable
default Map<String, Object> additionalHeaders() {
return null;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/RetryingMessageListenerAdapter.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener.adapter;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.listener.AcknowledgingConsumerAwareMessageListener;
import org.springframework.kafka.listener.MessageListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.lang.Nullable;
import org.springframework.retry.RecoveryCallback;
import org.springframework.retry.RetryState;
import org.springframework.retry.support.DefaultRetryState;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.util.Assert;
/**
* A retrying message listener adapter for {@link MessageListener}s.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @deprecated since 2.8 - use a suitably configured error handler instead.
*
*/
@Deprecated
public class RetryingMessageListenerAdapter<K, V>
extends AbstractRetryingMessageListenerAdapter<K, V, MessageListener<K, V>>
implements AcknowledgingConsumerAwareMessageListener<K, V> {
/**
* {@link org.springframework.retry.RetryContext} attribute key for an acknowledgment
* if the listener is capable of acknowledging.
*/
public static final String CONTEXT_ACKNOWLEDGMENT = "acknowledgment";
/**
* {@link org.springframework.retry.RetryContext} attribute key for the consumer if
* the listener is consumer-aware.
*/
public static final String CONTEXT_CONSUMER = "consumer";
/**
* {@link org.springframework.retry.RetryContext} attribute key for the record.
*/
public static final String CONTEXT_RECORD = "record";
private boolean stateful;
/**
* Construct an instance with the provided template and delegate. The exception will
* be thrown to the container after retries are exhausted.
* @param messageListener the delegate listener.
* @param retryTemplate the template.
*/
public RetryingMessageListenerAdapter(MessageListener<K, V> messageListener, RetryTemplate retryTemplate) {
this(messageListener, retryTemplate, null);
}
/**
* Construct an instance with the provided template, callback and delegate.
* @param messageListener the delegate listener.
* @param retryTemplate the template.
* @param recoveryCallback the recovery callback; if null, the exception will be
* thrown to the container after retries are exhausted.
*/
public RetryingMessageListenerAdapter(MessageListener<K, V> messageListener, RetryTemplate retryTemplate,
@Nullable RecoveryCallback<? extends Object> recoveryCallback) {
this(messageListener, retryTemplate, recoveryCallback, false);
}
/**
* Construct an instance with the provided template, callback and delegate. When using
* stateful retry, the retry context key is a concatenated String
* {@code topic-partition-offset}. A
* {@link org.springframework.kafka.listener.SeekToCurrentErrorHandler} is required in
* the listener container because stateful retry will throw the exception to the
* container for each delivery attempt.
* @param messageListener the delegate listener.
* @param retryTemplate the template.
* @param recoveryCallback the recovery callback; if null, the exception will be
* thrown to the container after retries are exhausted.
* @param stateful true for stateful retry.
* @since 2.1.3
*/
public RetryingMessageListenerAdapter(MessageListener<K, V> messageListener, RetryTemplate retryTemplate,
@Nullable RecoveryCallback<? extends Object> recoveryCallback, boolean stateful) {
super(messageListener, retryTemplate, recoveryCallback);
Assert.notNull(messageListener, "'messageListener' cannot be null");
this.stateful = stateful;
}
@Override
public void onMessage(final ConsumerRecord<K, V> record, @Nullable final Acknowledgment acknowledgment,
final Consumer<?, ?> consumer) {
RetryState retryState = null;
if (this.stateful) {
retryState = new DefaultRetryState(record.topic() + "-" + record.partition() + "-" + record.offset());
}
getRetryTemplate().execute(context -> {
context.setAttribute(CONTEXT_RECORD, record);
switch (RetryingMessageListenerAdapter.this.delegateType) {
case ACKNOWLEDGING_CONSUMER_AWARE:
context.setAttribute(CONTEXT_ACKNOWLEDGMENT, acknowledgment);
context.setAttribute(CONTEXT_CONSUMER, consumer);
RetryingMessageListenerAdapter.this.delegate.onMessage(record, acknowledgment, consumer);
break;
case ACKNOWLEDGING:
context.setAttribute(CONTEXT_ACKNOWLEDGMENT, acknowledgment);
RetryingMessageListenerAdapter.this.delegate.onMessage(record, acknowledgment);
break;
case CONSUMER_AWARE:
context.setAttribute(CONTEXT_CONSUMER, consumer);
RetryingMessageListenerAdapter.this.delegate.onMessage(record, consumer);
break;
case SIMPLE:
RetryingMessageListenerAdapter.this.delegate.onMessage(record);
}
return null;
},
getRecoveryCallback(), retryState);
}
/*
* Since the container uses the delegate's type to determine which method to call, we
* must implement them all.
*/
@Override
public void onMessage(ConsumerRecord<K, V> data) {
onMessage(data, null, null); // NOSONAR
}
@Override
public void onMessage(ConsumerRecord<K, V> data, Acknowledgment acknowledgment) {
onMessage(data, acknowledgment, null); // NOSONAR
}
@Override
public void onMessage(ConsumerRecord<K, V> data, Consumer<?, ?> consumer) {
onMessage(data, null, consumer);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/adapter/package-info.java | /**
* Provides classes for adapting listeners.
*/
@org.springframework.lang.NonNullApi
package org.springframework.kafka.listener.adapter;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/requestreply/AggregatingReplyingKafkaTemplate.java | /*
* Copyright 2019-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.requestreply;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.BiPredicate;
import java.util.stream.Collectors;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.listener.BatchConsumerAwareMessageListener;
import org.springframework.kafka.listener.ContainerProperties.AckMode;
import org.springframework.kafka.listener.GenericMessageListenerContainer;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.KafkaUtils;
import org.springframework.util.Assert;
/**
* A replying template that aggregates multiple replies with the same correlation id.
*
* @param <K> the key type.
* @param <V> the outbound data type.
* @param <R> the reply data type.
*
* @author Gary Russell
* @since 2.3
*
*/
public class AggregatingReplyingKafkaTemplate<K, V, R>
extends ReplyingKafkaTemplate<K, V, Collection<ConsumerRecord<K, R>>>
implements BatchConsumerAwareMessageListener<K, Collection<ConsumerRecord<K, R>>> {
/**
* Pseudo topic name for the "outer" {@link ConsumerRecord} that has the aggregated
* results in its value after a normal release by the release strategy.
*/
public static final String AGGREGATED_RESULTS_TOPIC = "aggregatedResults";
/**
* Pseudo topic name for the "outer" {@link ConsumerRecord} that has the aggregated
* results in its value after a timeout.
*/
public static final String PARTIAL_RESULTS_AFTER_TIMEOUT_TOPIC = "partialResultsAfterTimeout";
private static final int DEFAULT_COMMIT_TIMEOUT = 30;
private final Map<CorrelationKey, Set<RecordHolder<K, R>>> pending = new HashMap<>();
private final Map<TopicPartition, Long> offsets = new HashMap<>();
private final BiPredicate<List<ConsumerRecord<K, R>>, Boolean> releaseStrategy;
private Duration commitTimeout = Duration.ofSeconds(DEFAULT_COMMIT_TIMEOUT);
private boolean returnPartialOnTimeout;
/**
* Construct an instance using the provided parameter arguments. The releaseStrategy
* is consulted to determine when a collection is "complete".
* @param producerFactory the producer factory.
* @param replyContainer the reply container.
* @param releaseStrategy the release strategy which is a {@link BiPredicate} which is
* passed the current list and a boolean to indicate if this is for a normal delivery
* or a timeout (when {@link #setReturnPartialOnTimeout(boolean)} is true. The
* predicate may modify the list of records.
* @since 2.3.5
*/
public AggregatingReplyingKafkaTemplate(ProducerFactory<K, V> producerFactory,
GenericMessageListenerContainer<K, Collection<ConsumerRecord<K, R>>> replyContainer,
BiPredicate<List<ConsumerRecord<K, R>>, Boolean> releaseStrategy) {
super(producerFactory, replyContainer);
Assert.notNull(releaseStrategy, "'releaseStrategy' cannot be null");
AckMode ackMode = replyContainer.getContainerProperties().getAckMode();
Assert.isTrue(ackMode.equals(AckMode.MANUAL) || ackMode.equals(AckMode.MANUAL_IMMEDIATE),
"The reply container must have a MANUAL or MANUAL_IMMEDIATE AckMode");
this.releaseStrategy = releaseStrategy;
}
/**
* Set the timeout to use when committing offsets.
* @param commitTimeout the timeout.
*/
public void setCommitTimeout(Duration commitTimeout) {
Assert.notNull(commitTimeout, "'commitTimeout' cannot be null");
this.commitTimeout = commitTimeout;
}
/**
* Set to true to return a partial result when a request times out.
* @param returnPartialOnTimeout true to return a partial result.
*/
public synchronized void setReturnPartialOnTimeout(boolean returnPartialOnTimeout) {
this.returnPartialOnTimeout = returnPartialOnTimeout;
}
@Override
public void onMessage(List<ConsumerRecord<K, Collection<ConsumerRecord<K, R>>>> data, Consumer<?, ?> consumer) {
List<ConsumerRecord<K, Collection<ConsumerRecord<K, R>>>> completed = new ArrayList<>();
data.forEach(record -> {
Header correlation = record.headers().lastHeader(KafkaHeaders.CORRELATION_ID);
if (correlation == null) {
this.logger.error(() -> "No correlationId found in reply: " + KafkaUtils.format(record)
+ " - to use request/reply semantics, the responding server must return the correlation id "
+ " in the '" + KafkaHeaders.CORRELATION_ID + "' header");
}
else {
CorrelationKey correlationId = new CorrelationKey(correlation.value());
synchronized (this) {
if (isPending(correlationId)) {
List<ConsumerRecord<K, R>> list = addToCollection(record, correlationId).stream()
.map(RecordHolder::getRecord)
.collect(Collectors.toList());
if (this.releaseStrategy.test(list, false)) {
ConsumerRecord<K, Collection<ConsumerRecord<K, R>>> done =
new ConsumerRecord<>(AGGREGATED_RESULTS_TOPIC, 0, 0L, null, list);
done.headers()
.add(new RecordHeader(KafkaHeaders.CORRELATION_ID, correlationId
.getCorrelationId()));
this.pending.remove(correlationId);
checkOffsetsAndCommitIfNecessary(list, consumer);
completed.add(done);
}
}
else {
logLateArrival(record, correlationId);
}
}
}
});
if (completed.size() > 0) {
super.onMessage(completed);
}
}
@Override
protected synchronized boolean handleTimeout(CorrelationKey correlationId,
RequestReplyFuture<K, V, Collection<ConsumerRecord<K, R>>> future) {
Set<RecordHolder<K, R>> removed = this.pending.remove(correlationId);
if (removed != null && this.returnPartialOnTimeout) {
List<ConsumerRecord<K, R>> list = removed.stream()
.map(RecordHolder::getRecord)
.collect(Collectors.toList());
if (this.releaseStrategy.test(list, true)) {
future.set(new ConsumerRecord<>(PARTIAL_RESULTS_AFTER_TIMEOUT_TOPIC, 0, 0L, null, list));
return true;
}
}
return false;
}
private void checkOffsetsAndCommitIfNecessary(List<ConsumerRecord<K, R>> list, Consumer<?, ?> consumer) {
list.forEach(record -> this.offsets.compute(
new TopicPartition(record.topic(), record.partition()),
(k, v) -> v == null ? record.offset() + 1 : Math.max(v, record.offset() + 1)));
if (this.pending.isEmpty() && !this.offsets.isEmpty()) {
consumer.commitSync(this.offsets.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey,
entry -> new OffsetAndMetadata(entry.getValue()))),
this.commitTimeout);
this.offsets.clear();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private Set<RecordHolder<K, R>> addToCollection(ConsumerRecord record, CorrelationKey correlationId) {
Set<RecordHolder<K, R>> set = this.pending.computeIfAbsent(correlationId, id -> new LinkedHashSet<>());
set.add(new RecordHolder<>(record));
return set;
}
private static final class RecordHolder<K, R> {
private final ConsumerRecord<K, R> record;
RecordHolder(ConsumerRecord<K, R> record) {
this.record = record;
}
ConsumerRecord<K, R> getRecord() {
return this.record;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result
+ this.record.topic().hashCode()
+ this.record.partition()
+ (int) this.record.offset();
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
@SuppressWarnings("rawtypes")
RecordHolder other = (RecordHolder) obj;
if (this.record == null) {
if (other.record != null) {
return false;
}
}
else {
return this.record.topic().equals(other.record.topic())
&& this.record.partition() == other.record.partition()
&& this.record.offset() == other.record.offset();
}
return false;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/requestreply/CorrelationKey.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.requestreply;
import java.math.BigInteger;
import java.util.Arrays;
import org.springframework.util.Assert;
/**
* Wrapper for byte[] that can be used as a hash key. We could have used BigInteger
* instead but this wrapper is much less expensive. We do use a BigInteger in
* {@link #toString()} though.
*
* @author Gary Russell
* @since 2.1.3
*/
public final class CorrelationKey {
private final byte[] correlationId;
private volatile Integer hashCode;
public CorrelationKey(byte[] correlationId) { // NOSONAR array reference
Assert.notNull(correlationId, "'correlationId' cannot be null");
this.correlationId = correlationId; // NOSONAR array reference
}
public byte[] getCorrelationId() {
return this.correlationId; // NOSONAR
}
@Override
public int hashCode() {
if (this.hashCode != null) {
return this.hashCode;
}
final int prime = 31;
int result = 1;
result = prime * result + Arrays.hashCode(this.correlationId);
this.hashCode = result;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
CorrelationKey other = (CorrelationKey) obj;
if (!Arrays.equals(this.correlationId, other.correlationId)) {
return false;
}
return true;
}
@Override
public String toString() {
return "[" + new BigInteger(this.correlationId) + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/requestreply/KafkaReplyTimeoutException.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.requestreply;
import org.springframework.kafka.KafkaException;
/**
* Exception when a reply is not received within a timeout.
*
* @author Gary Russell
* @since 2.3
*
*/
public class KafkaReplyTimeoutException extends KafkaException {
private static final long serialVersionUID = 1L;
public KafkaReplyTimeoutException(String message) {
super(message);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/requestreply/ReplyingKafkaOperations.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.requestreply;
import java.time.Duration;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.core.ParameterizedTypeReference;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
/**
* Request/reply operations.
*
* @param <K> the key type.
* @param <V> the outbound data type.
* @param <R> the reply data type.
*
* @author Gary Russell
* @since 2.1.3
*
*/
public interface ReplyingKafkaOperations<K, V, R> {
/**
* Send a request message and receive a reply message with the default timeout.
* @param message the message to send.
* @return a RequestReplyMessageFuture.
* @since 2.7
*
*/
default RequestReplyMessageFuture<K, V> sendAndReceive(Message<?> message) {
throw new UnsupportedOperationException();
}
/**
* Send a request message and receive a reply message.
* @param message the message to send.
* @param replyTimeout the reply timeout; if null, the default will be used.
* @return a RequestReplyMessageFuture.
* @since 2.7
*/
default RequestReplyMessageFuture<K, V> sendAndReceive(Message<?> message, @Nullable Duration replyTimeout) {
throw new UnsupportedOperationException();
}
/**
* Send a request message and receive a reply message.
* @param message the message to send.
* @param returnType a hint to the message converter for the reply payload type.
* @param <P> the reply payload type.
* @return a RequestReplyMessageFuture.
* @since 2.7
*/
default <P> RequestReplyTypedMessageFuture<K, V, P> sendAndReceive(Message<?> message,
ParameterizedTypeReference<P> returnType) {
throw new UnsupportedOperationException();
}
/**
* Send a request message and receive a reply message.
* @param message the message to send.
* @param replyTimeout the reply timeout; if null, the default will be used.
* @param returnType a hint to the message converter for the reply payload type.
* @param <P> the reply payload type.
* @return a RequestReplyMessageFuture.
* @since 2.7
*/
default <P> RequestReplyTypedMessageFuture<K, V, P> sendAndReceive(Message<?> message, Duration replyTimeout,
ParameterizedTypeReference<P> returnType) {
throw new UnsupportedOperationException();
}
/**
* Send a request and receive a reply with the default timeout.
* @param record the record to send.
* @return a RequestReplyFuture.
*/
RequestReplyFuture<K, V, R> sendAndReceive(ProducerRecord<K, V> record);
/**
* Send a request and receive a reply.
* @param record the record to send.
* @param replyTimeout the reply timeout; if null, the default will be used.
* @return a RequestReplyFuture.
* @since 2.3
*/
RequestReplyFuture<K, V, R> sendAndReceive(ProducerRecord<K, V> record, Duration replyTimeout);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/requestreply/ReplyingKafkaTemplate.java | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.requestreply;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.time.Instant;
import java.util.Collection;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.context.SmartLifecycle;
import org.springframework.core.ParameterizedTypeReference;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.KafkaException;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.listener.BatchMessageListener;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.listener.GenericMessageListenerContainer;
import org.springframework.kafka.listener.ListenerUtils;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.KafkaUtils;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.kafka.support.serializer.DeserializationException;
import org.springframework.kafka.support.serializer.SerializationUtils;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
import org.springframework.scheduling.TaskScheduler;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
import org.springframework.util.Assert;
/**
* A KafkaTemplate that implements request/reply semantics.
*
* @param <K> the key type.
* @param <V> the outbound data type.
* @param <R> the reply data type.
*
* @author Gary Russell
* @author Artem Bilan
*
* @since 2.1.3
*
*/
public class ReplyingKafkaTemplate<K, V, R> extends KafkaTemplate<K, V> implements BatchMessageListener<K, R>,
InitializingBean, SmartLifecycle, DisposableBean, ReplyingKafkaOperations<K, V, R> {
private static final String WITH_CORRELATION_ID = " with correlationId: ";
private static final int FIVE = 5;
private static final Duration DEFAULT_REPLY_TIMEOUT = Duration.ofSeconds(FIVE);
private final GenericMessageListenerContainer<K, R> replyContainer;
private final ConcurrentMap<CorrelationKey, RequestReplyFuture<K, V, R>> futures = new ConcurrentHashMap<>();
private final byte[] replyTopic;
private final byte[] replyPartition;
private TaskScheduler scheduler = new ThreadPoolTaskScheduler();
private int phase;
private boolean autoStartup = true;
private Duration defaultReplyTimeout = DEFAULT_REPLY_TIMEOUT;
private boolean schedulerSet;
private boolean sharedReplyTopic;
private Function<ProducerRecord<K, V>, CorrelationKey> correlationStrategy = ReplyingKafkaTemplate::defaultCorrelationIdStrategy;
private String correlationHeaderName = KafkaHeaders.CORRELATION_ID;
private String replyTopicHeaderName = KafkaHeaders.REPLY_TOPIC;
private String replyPartitionHeaderName = KafkaHeaders.REPLY_PARTITION;
private Function<ConsumerRecord<?, ?>, Exception> replyErrorChecker = rec -> null;
private volatile boolean running;
private volatile boolean schedulerInitialized;
public ReplyingKafkaTemplate(ProducerFactory<K, V> producerFactory,
GenericMessageListenerContainer<K, R> replyContainer) {
this(producerFactory, replyContainer, false);
}
public ReplyingKafkaTemplate(ProducerFactory<K, V> producerFactory,
GenericMessageListenerContainer<K, R> replyContainer, boolean autoFlush) {
super(producerFactory, autoFlush);
Assert.notNull(replyContainer, "'replyContainer' cannot be null");
this.replyContainer = replyContainer;
this.replyContainer.setupMessageListener(this);
ContainerProperties properties = this.replyContainer.getContainerProperties();
String tempReplyTopic = null;
byte[] tempReplyPartition = null;
TopicPartitionOffset[] topicPartitionsToAssign = properties.getTopicPartitions();
String[] topics = properties.getTopics();
if (topics != null && topics.length == 1) {
tempReplyTopic = topics[0];
}
else if (topicPartitionsToAssign != null && topicPartitionsToAssign.length == 1) {
TopicPartitionOffset topicPartitionOffset = topicPartitionsToAssign[0];
Assert.notNull(topicPartitionOffset, "'topicPartitionsToAssign' must not be null");
tempReplyTopic = topicPartitionOffset.getTopic();
ByteBuffer buffer = ByteBuffer.allocate(4); // NOSONAR magic #
buffer.putInt(topicPartitionOffset.getPartition());
tempReplyPartition = buffer.array();
}
if (tempReplyTopic == null) {
this.replyTopic = null;
this.replyPartition = null;
this.logger.debug(() -> "Could not determine container's reply topic/partition; senders must populate "
+ "at least the " + KafkaHeaders.REPLY_TOPIC + " header, and optionally the "
+ KafkaHeaders.REPLY_PARTITION + " header");
}
else {
this.replyTopic = tempReplyTopic.getBytes(StandardCharsets.UTF_8);
this.replyPartition = tempReplyPartition;
}
}
public void setTaskScheduler(TaskScheduler scheduler) {
Assert.notNull(scheduler, "'scheduler' cannot be null");
this.scheduler = scheduler;
this.schedulerSet = true;
}
/**
* Return the reply timeout used if no replyTimeout is provided in the
* {@link #sendAndReceive(ProducerRecord, Duration)} call.
* @return the timeout.
* @since 2.3
*/
protected Duration getDefaultReplyTimeout() {
return this.defaultReplyTimeout;
}
/**
* Set the reply timeout used if no replyTimeout is provided in the
* {@link #sendAndReceive(ProducerRecord, Duration)} call.
* @param defaultReplyTimeout the timeout.
* @since 2.3
*/
public void setDefaultReplyTimeout(Duration defaultReplyTimeout) {
Assert.notNull(defaultReplyTimeout, "'defaultReplyTimeout' cannot be null");
Assert.isTrue(defaultReplyTimeout.toMillis() >= 0, "'replyTimeout' must be >= 0");
this.defaultReplyTimeout = defaultReplyTimeout;
}
@Override
public boolean isRunning() {
return this.running;
}
@Override
public int getPhase() {
return this.phase;
}
public void setPhase(int phase) {
this.phase = phase;
}
@Override
public boolean isAutoStartup() {
return this.autoStartup;
}
public void setAutoStartup(boolean autoStartup) {
this.autoStartup = autoStartup;
}
/**
* Return the topics/partitions assigned to the replying listener container.
* @return the topics/partitions.
*/
public Collection<TopicPartition> getAssignedReplyTopicPartitions() {
return this.replyContainer.getAssignedPartitions();
}
/**
* Set to true when multiple templates are using the same topic for replies. This
* simply changes logs for unexpected replies to debug instead of error.
* @param sharedReplyTopic true if using a shared topic.
* @since 2.2
*/
public void setSharedReplyTopic(boolean sharedReplyTopic) {
this.sharedReplyTopic = sharedReplyTopic;
}
/**
* Set a function to be called to establish a unique correlation key for each request
* record.
* @param correlationStrategy the function.
* @since 2.3
*/
public void setCorrelationIdStrategy(Function<ProducerRecord<K, V>, CorrelationKey> correlationStrategy) {
Assert.notNull(correlationStrategy, "'correlationStrategy' cannot be null");
this.correlationStrategy = correlationStrategy;
}
/**
* Set a custom header name for the correlation id. Default
* {@link KafkaHeaders#CORRELATION_ID}.
* @param correlationHeaderName the header name.
* @since 2.3
*/
public void setCorrelationHeaderName(String correlationHeaderName) {
Assert.notNull(correlationHeaderName, "'correlationHeaderName' cannot be null");
this.correlationHeaderName = correlationHeaderName;
}
/**
* Set a custom header name for the reply topic. Default
* {@link KafkaHeaders#REPLY_TOPIC}.
* @param replyTopicHeaderName the header name.
* @since 2.3
*/
public void setReplyTopicHeaderName(String replyTopicHeaderName) {
Assert.notNull(replyTopicHeaderName, "'replyTopicHeaderName' cannot be null");
this.replyTopicHeaderName = replyTopicHeaderName;
}
/**
* Set a custom header name for the reply partition. Default
* {@link KafkaHeaders#REPLY_PARTITION}.
* @param replyPartitionHeaderName the reply partition header name.
* @since 2.3
*/
public void setReplyPartitionHeaderName(String replyPartitionHeaderName) {
Assert.notNull(replyPartitionHeaderName, "'replyPartitionHeaderName' cannot be null");
this.replyPartitionHeaderName = replyPartitionHeaderName;
}
/**
* Set a function to examine replies for an error returned by the server.
* @param replyErrorChecker the error checker function.
* @since 2.6.7
*/
public void setReplyErrorChecker(Function<ConsumerRecord<?, ?>, Exception> replyErrorChecker) {
Assert.notNull(replyErrorChecker, "'replyErrorChecker' cannot be null");
this.replyErrorChecker = replyErrorChecker;
}
@Override
public void afterPropertiesSet() {
if (!this.schedulerSet && !this.schedulerInitialized) {
((ThreadPoolTaskScheduler) this.scheduler).initialize();
this.schedulerInitialized = true;
}
}
@Override
public synchronized void start() {
if (!this.running) {
try {
afterPropertiesSet();
}
catch (Exception e) {
throw new KafkaException("Failed to initialize", e);
}
this.replyContainer.start();
this.running = true;
}
}
@Override
public synchronized void stop() {
if (this.running) {
this.running = false;
this.replyContainer.stop();
this.futures.clear();
}
}
@Override
public void stop(Runnable callback) {
stop();
callback.run();
}
@Override
public RequestReplyMessageFuture<K, V> sendAndReceive(Message<?> message) {
return sendAndReceive(message, this.defaultReplyTimeout, null);
}
@Override
public RequestReplyMessageFuture<K, V> sendAndReceive(Message<?> message, Duration replyTimeout) {
return sendAndReceive(message, replyTimeout, null);
}
@Override
public <P> RequestReplyTypedMessageFuture<K, V, P> sendAndReceive(Message<?> message,
@Nullable ParameterizedTypeReference<P> returnType) {
return sendAndReceive(message, this.defaultReplyTimeout, returnType);
}
@SuppressWarnings("unchecked")
@Override
public <P> RequestReplyTypedMessageFuture<K, V, P> sendAndReceive(Message<?> message,
@Nullable Duration replyTimeout,
@Nullable ParameterizedTypeReference<P> returnType) {
RequestReplyFuture<K, V, R> future = sendAndReceive((ProducerRecord<K, V>) getMessageConverter()
.fromMessage(message, getDefaultTopic()), replyTimeout);
RequestReplyTypedMessageFuture<K, V, P> replyFuture =
new RequestReplyTypedMessageFuture<>(future.getSendFuture());
future.addCallback(
result -> {
try {
replyFuture.set(getMessageConverter()
.toMessage(result, null, null, returnType == null ? null : returnType.getType()));
}
catch (Exception ex) { // NOSONAR
replyFuture.setException(ex);
}
},
ex -> replyFuture.setException(ex));
return replyFuture;
}
@Override
public RequestReplyFuture<K, V, R> sendAndReceive(ProducerRecord<K, V> record) {
return sendAndReceive(record, this.defaultReplyTimeout);
}
@Override
public RequestReplyFuture<K, V, R> sendAndReceive(ProducerRecord<K, V> record, @Nullable Duration replyTimeout) {
Assert.state(this.running, "Template has not been start()ed"); // NOSONAR (sync)
Duration timeout = replyTimeout;
if (timeout == null) {
timeout = this.defaultReplyTimeout;
}
CorrelationKey correlationId = this.correlationStrategy.apply(record);
Assert.notNull(correlationId, "the created 'correlationId' cannot be null");
Headers headers = record.headers();
boolean hasReplyTopic = headers.lastHeader(KafkaHeaders.REPLY_TOPIC) != null;
if (!hasReplyTopic && this.replyTopic != null) {
headers.add(new RecordHeader(this.replyTopicHeaderName, this.replyTopic));
if (this.replyPartition != null) {
headers.add(new RecordHeader(this.replyPartitionHeaderName, this.replyPartition));
}
}
headers.add(new RecordHeader(this.correlationHeaderName, correlationId.getCorrelationId()));
this.logger.debug(() -> "Sending: " + KafkaUtils.format(record) + WITH_CORRELATION_ID + correlationId);
RequestReplyFuture<K, V, R> future = new RequestReplyFuture<>();
this.futures.put(correlationId, future);
try {
future.setSendFuture(send(record));
}
catch (Exception e) {
this.futures.remove(correlationId);
throw new KafkaException("Send failed", e);
}
scheduleTimeout(record, correlationId, timeout);
return future;
}
private void scheduleTimeout(ProducerRecord<K, V> record, CorrelationKey correlationId, Duration replyTimeout) {
this.scheduler.schedule(() -> {
RequestReplyFuture<K, V, R> removed = this.futures.remove(correlationId);
if (removed != null) {
this.logger.warn(() -> "Reply timed out for: " + KafkaUtils.format(record)
+ WITH_CORRELATION_ID + correlationId);
if (!handleTimeout(correlationId, removed)) {
removed.setException(new KafkaReplyTimeoutException("Reply timed out"));
}
}
}, Instant.now().plus(replyTimeout));
}
/**
* Used to inform subclasses that a request has timed out so they can clean up state
* and, optionally, complete the future.
* @param correlationId the correlation id.
* @param future the future.
* @return true to indicate the future has been completed.
* @since 2.3
*/
protected boolean handleTimeout(@SuppressWarnings("unused") CorrelationKey correlationId,
@SuppressWarnings("unused") RequestReplyFuture<K, V, R> future) {
return false;
}
/**
* Return true if this correlation id is still active.
* @param correlationId the correlation id.
* @return true if pending.
* @since 2.3
*/
protected boolean isPending(CorrelationKey correlationId) {
return this.futures.containsKey(correlationId);
}
@Override
public void destroy() {
if (!this.schedulerSet) {
((ThreadPoolTaskScheduler) this.scheduler).destroy();
}
}
private static <K, V> CorrelationKey defaultCorrelationIdStrategy(
@SuppressWarnings("unused") ProducerRecord<K, V> record) {
UUID uuid = UUID.randomUUID();
byte[] bytes = new byte[16]; // NOSONAR magic #
ByteBuffer bb = ByteBuffer.wrap(bytes);
bb.putLong(uuid.getMostSignificantBits());
bb.putLong(uuid.getLeastSignificantBits());
return new CorrelationKey(bytes);
}
@Override
public void onMessage(List<ConsumerRecord<K, R>> data) {
data.forEach(record -> {
Header correlationHeader = record.headers().lastHeader(this.correlationHeaderName);
CorrelationKey correlationId = null;
if (correlationHeader != null) {
correlationId = new CorrelationKey(correlationHeader.value());
}
if (correlationId == null) {
this.logger.error(() -> "No correlationId found in reply: " + KafkaUtils.format(record)
+ " - to use request/reply semantics, the responding server must return the correlation id "
+ " in the '" + this.correlationHeaderName + "' header");
}
else {
RequestReplyFuture<K, V, R> future = this.futures.remove(correlationId);
CorrelationKey correlationKey = correlationId;
if (future == null) {
logLateArrival(record, correlationId);
}
else {
boolean ok = true;
Exception exception = checkForErrors(record);
if (exception != null) {
ok = false;
future.setException(exception);
}
if (ok) {
this.logger.debug(() -> "Received: " + KafkaUtils.format(record)
+ WITH_CORRELATION_ID + correlationKey);
future.set(record);
}
}
}
});
}
/**
* Check for errors in a reply. The default implementation checks for {@link DeserializationException}s
* and invokes the {@link #setReplyErrorChecker(Function) replyErrorChecker} function.
* @param record the record.
* @return the exception, or null if none.
* @since 2.6.7
*/
@Nullable
protected Exception checkForErrors(ConsumerRecord<K, R> record) {
if (record.value() == null || record.key() == null) {
DeserializationException de = checkDeserialization(record, this.logger);
if (de != null) {
return de;
}
}
return this.replyErrorChecker.apply(record);
}
/**
* Return a {@link DeserializationException} if either the key or value failed
* deserialization; null otherwise. If you need to determine whether it was the key or
* value, call
* {@link ListenerUtils#getExceptionFromHeader(ConsumerRecord, String, LogAccessor)}
* with {@link SerializationUtils#KEY_DESERIALIZER_EXCEPTION_HEADER} and
* {@link SerializationUtils#VALUE_DESERIALIZER_EXCEPTION_HEADER} instead.
* @param record the record.
* @param logger a {@link LogAccessor}.
* @return the {@link DeserializationException} or {@code null}.
* @since 2.2.15
*/
@Nullable
public static DeserializationException checkDeserialization(ConsumerRecord<?, ?> record, LogAccessor logger) {
DeserializationException exception = ListenerUtils.getExceptionFromHeader(record,
SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER, logger);
if (exception != null) {
logger.error(exception, () -> "Reply value deserialization failed for " + record.topic() + "-"
+ record.partition() + "@" + record.offset());
return exception;
}
exception = ListenerUtils.getExceptionFromHeader(record,
SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER, logger);
if (exception != null) {
logger.error(exception, () -> "Reply key deserialization failed for " + record.topic() + "-"
+ record.partition() + "@" + record.offset());
return exception;
}
return null;
}
protected void logLateArrival(ConsumerRecord<K, R> record, CorrelationKey correlationId) {
if (this.sharedReplyTopic) {
this.logger.debug(() -> missingCorrelationLogMessage(record, correlationId));
}
else {
this.logger.error(() -> missingCorrelationLogMessage(record, correlationId));
}
}
private String missingCorrelationLogMessage(ConsumerRecord<K, R> record, CorrelationKey correlationId) {
return "No pending reply: " + KafkaUtils.format(record) + WITH_CORRELATION_ID
+ correlationId + ", perhaps timed out, or using a shared reply topic";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/requestreply/RequestReplyFuture.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.requestreply;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.SettableListenableFuture;
/**
* A listenable future for requests/replies.
*
* @param <K> the key type.
* @param <V> the outbound data type.
* @param <R> the reply data type.
*
* @author Gary Russell
* @since 2.1.3
*
*/
public class RequestReplyFuture<K, V, R> extends SettableListenableFuture<ConsumerRecord<K, R>> {
private volatile ListenableFuture<SendResult<K, V>> sendFuture;
protected void setSendFuture(ListenableFuture<SendResult<K, V>> sendFuture) {
this.sendFuture = sendFuture;
}
public ListenableFuture<SendResult<K, V>> getSendFuture() {
return this.sendFuture;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/requestreply/RequestReplyMessageFuture.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.requestreply;
import org.springframework.kafka.support.SendResult;
import org.springframework.messaging.Message;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.SettableListenableFuture;
/**
* A listenable future for {@link Message} replies.
*
* @param <K> the key type.
* @param <V> the outbound data type.
*
* @author Gary Russell
* @since 2.7
*
*/
public class RequestReplyMessageFuture<K, V> extends SettableListenableFuture<Message<?>> {
private final ListenableFuture<SendResult<K, V>> sendFuture;
RequestReplyMessageFuture(ListenableFuture<SendResult<K, V>> sendFuture) {
this.sendFuture = sendFuture;
}
/**
* Return the send future.
* @return the send future.
*/
public ListenableFuture<SendResult<K, V>> getSendFuture() {
return this.sendFuture;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/requestreply/RequestReplyTypedMessageFuture.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.requestreply;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.springframework.kafka.support.SendResult;
import org.springframework.messaging.Message;
import org.springframework.util.concurrent.ListenableFuture;
/**
* A listenable future for {@link Message} replies with a specific payload type.
*
* @param <K> the key type.
* @param <V> the outbound data type.
* @param <P> the reply payload type.
*
* @author Gary Russell
* @since 2.7
*
*/
public class RequestReplyTypedMessageFuture<K, V, P> extends RequestReplyMessageFuture<K, V> {
RequestReplyTypedMessageFuture(ListenableFuture<SendResult<K, V>> sendFuture) {
super(sendFuture);
}
@SuppressWarnings("unchecked")
@Override
public Message<P> get() throws InterruptedException, ExecutionException {
return (Message<P>) super.get();
}
@SuppressWarnings("unchecked")
@Override
public Message<P> get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return (Message<P>) super.get(timeout, unit);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/requestreply/package-info.java | /**
* Provides classes for request/reply semantics.
*/
package org.springframework.kafka.requestreply;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/BackOffValuesGenerator.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
import org.springframework.retry.backoff.BackOffContext;
import org.springframework.retry.backoff.BackOffPolicy;
import org.springframework.retry.backoff.FixedBackOffPolicy;
import org.springframework.retry.backoff.NoBackOffPolicy;
import org.springframework.retry.backoff.Sleeper;
import org.springframework.retry.backoff.SleepingBackOffPolicy;
import org.springframework.retry.backoff.UniformRandomBackOffPolicy;
import org.springframework.retry.support.RetrySynchronizationManager;
/**
*
* Generates the backoff values from the provided maxAttempts value and
* {@link BackOffPolicy}.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public class BackOffValuesGenerator {
private static final BackOffPolicy DEFAULT_BACKOFF_POLICY = new FixedBackOffPolicy();
private final int numberOfvaluesToCreate;
private final BackOffPolicy backOffPolicy;
public BackOffValuesGenerator(int providedMaxAttempts, BackOffPolicy providedBackOffPolicy) {
this.numberOfvaluesToCreate = getMaxAttemps(providedMaxAttempts) - 1;
BackOffPolicy policy = providedBackOffPolicy != null ? providedBackOffPolicy : DEFAULT_BACKOFF_POLICY;
checkBackOffPolicyTipe(policy);
this.backOffPolicy = policy;
}
public int getMaxAttemps(int providedMaxAttempts) {
return providedMaxAttempts != RetryTopicConstants.NOT_SET
? providedMaxAttempts
: RetryTopicConstants.DEFAULT_MAX_ATTEMPTS;
}
public List<Long> generateValues() {
return NoBackOffPolicy.class.isAssignableFrom(this.backOffPolicy.getClass())
? generateFromNoBackOffPolicy(this.numberOfvaluesToCreate)
: generateFromSleepingBackOffPolicy(this.numberOfvaluesToCreate, this.backOffPolicy);
}
private void checkBackOffPolicyTipe(BackOffPolicy providedBackOffPolicy) {
if (!(SleepingBackOffPolicy.class.isAssignableFrom(providedBackOffPolicy.getClass())
|| NoBackOffPolicy.class.isAssignableFrom(providedBackOffPolicy.getClass()))) {
throw new IllegalArgumentException("Either a SleepingBackOffPolicy or a NoBackOffPolicy must be provided. " +
"Provided BackOffPolicy: " + providedBackOffPolicy.getClass().getSimpleName());
}
}
private List<Long> generateFromSleepingBackOffPolicy(int maxAttempts, BackOffPolicy providedBackOffPolicy) {
BackoffRetainerSleeper sleeper = new BackoffRetainerSleeper();
SleepingBackOffPolicy<?> retainingBackOffPolicy = ((SleepingBackOffPolicy<?>) providedBackOffPolicy).withSleeper(sleeper);
// UniformRandomBackOffPolicy loses the max value when a sleeper is set.
if (providedBackOffPolicy instanceof UniformRandomBackOffPolicy) {
((UniformRandomBackOffPolicy) retainingBackOffPolicy)
.setMaxBackOffPeriod(((UniformRandomBackOffPolicy) providedBackOffPolicy).getMaxBackOffPeriod());
}
BackOffContext backOffContext = retainingBackOffPolicy.start(RetrySynchronizationManager.getContext());
IntStream.range(0, maxAttempts)
.forEach(index -> retainingBackOffPolicy.backOff(backOffContext));
return sleeper.getBackoffValues();
}
private List<Long> generateFromNoBackOffPolicy(int maxAttempts) {
return LongStream
.range(0, maxAttempts)
.mapToObj(index -> 0L)
.collect(Collectors.toList());
}
/**
* This class is injected in the backoff policy to gather and hold the generated backoff values.
*/
private static class BackoffRetainerSleeper implements Sleeper {
private static final long serialVersionUID = 1L;
private final List<Long> backoffValues = new ArrayList<>();
@Override
public void sleep(long backOffPeriod) throws InterruptedException {
this.backoffValues.add(backOffPeriod);
}
public List<Long> getBackoffValues() {
return this.backoffValues;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/DeadLetterPublishingRecovererFactory.java | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.time.Instant;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.springframework.core.NestedRuntimeException;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.core.KafkaOperations;
import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
import org.springframework.kafka.listener.SeekUtils;
import org.springframework.kafka.listener.TimestampedException;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.util.Assert;
/**
*
* Creates and configures the {@link DeadLetterPublishingRecoverer} that will be used to
* forward the messages using the {@link DestinationTopicResolver}.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @since 2.7
*
*/
public class DeadLetterPublishingRecovererFactory {
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(DeadLetterPublishingRecovererFactory.class));
private final DestinationTopicResolver destinationTopicResolver;
private final Set<Class<? extends Exception>> fatalExceptions = new LinkedHashSet<>();
private final Set<Class<? extends Exception>> nonFatalExceptions = new HashSet<>();
private Consumer<DeadLetterPublishingRecoverer> recovererCustomizer = recoverer -> { };
private BiFunction<ConsumerRecord<?, ?>, Exception, Headers> headersFunction;
public DeadLetterPublishingRecovererFactory(DestinationTopicResolver destinationTopicResolver) {
this.destinationTopicResolver = destinationTopicResolver;
}
/**
* Set a function that creates additional headers for the output record, in addition to the standard
* retry headers added by this factory.
* @param headersFunction the function.
* @since 2.8.4
*/
public void setHeadersFunction(BiFunction<ConsumerRecord<?, ?>, Exception, Headers> headersFunction) {
this.headersFunction = headersFunction;
}
/**
* Add exception type to the default list. By default, the following exceptions will
* not be retried:
* <ul>
* <li>{@link org.springframework.kafka.support.serializer.DeserializationException}</li>
* <li>{@link org.springframework.messaging.converter.MessageConversionException}</li>
* <li>{@link org.springframework.kafka.support.converter.ConversionException}</li>
* <li>{@link org.springframework.messaging.handler.invocation.MethodArgumentResolutionException}</li>
* <li>{@link NoSuchMethodException}</li>
* <li>{@link ClassCastException}</li>
* </ul>
* All others will be retried.
* @param exceptionType the exception type.
* @since 2.8
* @see #removeNotRetryableException(Class)
*/
public final void addNotRetryableException(Class<? extends Exception> exceptionType) {
Assert.notNull(exceptionType, "'exceptionType' cannot be null");
this.fatalExceptions.add(exceptionType);
}
/**
* Remove an exception type from the configured list. By default, the following
* exceptions will not be retried:
* <ul>
* <li>{@link org.springframework.kafka.support.serializer.DeserializationException}</li>
* <li>{@link org.springframework.messaging.converter.MessageConversionException}</li>
* <li>{@link org.springframework.kafka.support.converter.ConversionException}</li>
* <li>{@link org.springframework.messaging.handler.invocation.MethodArgumentResolutionException}</li>
* <li>{@link NoSuchMethodException}</li>
* <li>{@link ClassCastException}</li>
* </ul>
* All others will be retried.
* @param exceptionType the exception type.
* @return true if the removal was successful.
* @see #addNotRetryableException(Class)
*/
public boolean removeNotRetryableException(Class<? extends Exception> exceptionType) {
return this.nonFatalExceptions.add(exceptionType);
}
@SuppressWarnings("unchecked")
public DeadLetterPublishingRecoverer create() {
DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(// NOSONAR anon. class size
this::resolveTemplate,
false, (this::resolveDestination)) {
@Override
protected DeadLetterPublishingRecoverer.HeaderNames getHeaderNames() {
return DeadLetterPublishingRecoverer.HeaderNames.Builder
.original()
.offsetHeader(KafkaHeaders.ORIGINAL_OFFSET)
.timestampHeader(KafkaHeaders.ORIGINAL_TIMESTAMP)
.timestampTypeHeader(KafkaHeaders.ORIGINAL_TIMESTAMP_TYPE)
.topicHeader(KafkaHeaders.ORIGINAL_TOPIC)
.partitionHeader(KafkaHeaders.ORIGINAL_PARTITION)
.consumerGroupHeader(KafkaHeaders.ORIGINAL_CONSUMER_GROUP)
.exception()
.keyExceptionFqcn(KafkaHeaders.KEY_EXCEPTION_FQCN)
.exceptionFqcn(KafkaHeaders.EXCEPTION_FQCN)
.exceptionCauseFqcn(KafkaHeaders.EXCEPTION_CAUSE_FQCN)
.keyExceptionMessage(KafkaHeaders.KEY_EXCEPTION_MESSAGE)
.exceptionMessage(KafkaHeaders.EXCEPTION_MESSAGE)
.keyExceptionStacktrace(KafkaHeaders.KEY_EXCEPTION_STACKTRACE)
.exceptionStacktrace(KafkaHeaders.EXCEPTION_STACKTRACE)
.build();
}
};
recoverer.setHeadersFunction((consumerRecord, e) -> addHeaders(consumerRecord, e, getAttempts(consumerRecord)));
if (this.headersFunction != null) {
recoverer.addHeadersFunction(this.headersFunction);
}
recoverer.setFailIfSendResultIsError(true);
recoverer.setAppendOriginalHeaders(false);
recoverer.setThrowIfNoDestinationReturned(false);
recoverer.setSkipSameTopicFatalExceptions(false);
this.recovererCustomizer.accept(recoverer);
this.fatalExceptions.forEach(recoverer::addNotRetryableExceptions);
this.nonFatalExceptions.forEach(recoverer::removeClassification);
return recoverer;
}
private KafkaOperations<?, ?> resolveTemplate(ProducerRecord<?, ?> outRecord) {
return this.destinationTopicResolver
.getDestinationTopicByName(outRecord.topic())
.getKafkaOperations();
}
public void setDeadLetterPublishingRecovererCustomizer(Consumer<DeadLetterPublishingRecoverer> customizer) {
this.recovererCustomizer = customizer;
}
private TopicPartition resolveDestination(ConsumerRecord<?, ?> cr, Exception e) {
if (SeekUtils.isBackoffException(e)) {
throw (NestedRuntimeException) e; // Necessary to not commit the offset and seek to current again
}
DestinationTopic nextDestination = this.destinationTopicResolver.resolveDestinationTopic(
cr.topic(), getAttempts(cr), e, getOriginalTimestampHeaderLong(cr));
LOGGER.debug(() -> "Resolved topic: " + (nextDestination.isNoOpsTopic()
? "none"
: nextDestination.getDestinationName()));
return nextDestination.isNoOpsTopic()
? null
: resolveTopicPartition(cr, nextDestination);
}
/**
* Creates and returns the {@link TopicPartition}, where the original record should be forwarded.
* By default, it will use the partition same as original record's partition, in the next destination topic.
*
* <p>{@link DeadLetterPublishingRecoverer#checkPartition} has logic to check whether that partition exists,
* and if it doesn't it sets -1, to allow the Producer itself to assign a partition to the record.</p>
*
* <p>Subclasses can inherit from this method to override the implementation, if necessary.</p>
*
* @param cr The original {@link ConsumerRecord}, which is to be forwarded to DLT
* @param nextDestination The next {@link DestinationTopic}, where the consumerRecord is to be forwarded
* @return An instance of {@link TopicPartition}, specifying the topic and partition, where the cr is to be sent
*/
protected TopicPartition resolveTopicPartition(final ConsumerRecord<?, ?> cr, final DestinationTopic nextDestination) {
return new TopicPartition(nextDestination.getDestinationName(), cr.partition());
}
private int getAttempts(ConsumerRecord<?, ?> consumerRecord) {
Header header = consumerRecord.headers().lastHeader(RetryTopicHeaders.DEFAULT_HEADER_ATTEMPTS);
if (header != null) {
byte[] value = header.value();
if (value.length == Byte.BYTES) { // backwards compatibility
return value[0];
}
else if (value.length == Integer.BYTES) {
return ByteBuffer.wrap(value).getInt();
}
else {
LOGGER.debug(() -> "Unexected size for " + RetryTopicHeaders.DEFAULT_HEADER_ATTEMPTS + " header: "
+ value.length);
}
}
return 1;
}
private Headers addHeaders(ConsumerRecord<?, ?> consumerRecord, Exception e, int attempts) {
Headers headers = new RecordHeaders();
byte[] originalTimestampHeader = getOriginalTimestampHeaderBytes(consumerRecord);
headers.add(RetryTopicHeaders.DEFAULT_HEADER_ORIGINAL_TIMESTAMP, originalTimestampHeader);
headers.add(RetryTopicHeaders.DEFAULT_HEADER_ATTEMPTS,
ByteBuffer.wrap(new byte[Integer.BYTES]).putInt(attempts + 1).array());
headers.add(RetryTopicHeaders.DEFAULT_HEADER_BACKOFF_TIMESTAMP,
BigInteger.valueOf(getNextExecutionTimestamp(consumerRecord, e, originalTimestampHeader))
.toByteArray());
return headers;
}
private long getNextExecutionTimestamp(ConsumerRecord<?, ?> consumerRecord, Exception e,
byte[] originalTimestampHeader) {
long originalTimestamp = new BigInteger(originalTimestampHeader).longValue();
long failureTimestamp = getFailureTimestamp(e);
long nextExecutionTimestamp = failureTimestamp + this.destinationTopicResolver
.resolveDestinationTopic(consumerRecord.topic(), getAttempts(consumerRecord), e, originalTimestamp)
.getDestinationDelay();
LOGGER.debug(() -> String.format("FailureTimestamp: %s, Original timestamp: %s, nextExecutionTimestamp: %s",
failureTimestamp, originalTimestamp, nextExecutionTimestamp));
return nextExecutionTimestamp;
}
private long getFailureTimestamp(Exception e) {
return e instanceof NestedRuntimeException && ((NestedRuntimeException) e).contains(TimestampedException.class)
? getTimestampedException(e).getTimestamp()
: Instant.now().toEpochMilli();
}
private TimestampedException getTimestampedException(Throwable e) {
if (e == null) {
throw new IllegalArgumentException("Provided exception does not contain a "
+ TimestampedException.class.getSimpleName() + " cause.");
}
return e.getClass().isAssignableFrom(TimestampedException.class)
? (TimestampedException) e
: getTimestampedException(e.getCause());
}
private byte[] getOriginalTimestampHeaderBytes(ConsumerRecord<?, ?> consumerRecord) {
Header currentOriginalTimestampHeader = getOriginaTimeStampHeader(consumerRecord);
return currentOriginalTimestampHeader != null
? currentOriginalTimestampHeader.value()
: BigInteger.valueOf(consumerRecord.timestamp()).toByteArray();
}
private long getOriginalTimestampHeaderLong(ConsumerRecord<?, ?> consumerRecord) {
Header currentOriginalTimestampHeader = getOriginaTimeStampHeader(consumerRecord);
return currentOriginalTimestampHeader != null
? new BigInteger(currentOriginalTimestampHeader.value()).longValue()
: consumerRecord.timestamp();
}
private Header getOriginaTimeStampHeader(ConsumerRecord<?, ?> consumerRecord) {
return consumerRecord.headers()
.lastHeader(RetryTopicHeaders.DEFAULT_HEADER_ORIGINAL_TIMESTAMP);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/DefaultDestinationTopicProcessor.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.function.Consumer;
import java.util.stream.Collectors;
/**
*
* Default implementation of the {@link DestinationTopicProcessor} interface.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public class DefaultDestinationTopicProcessor implements DestinationTopicProcessor {
private final DestinationTopicResolver destinationTopicResolver;
public DefaultDestinationTopicProcessor(DestinationTopicResolver destinationTopicResolver) {
this.destinationTopicResolver = destinationTopicResolver;
}
@Override
public void processDestinationTopicProperties(Consumer<DestinationTopic.Properties> destinationPropertiesProcessor,
Context context) {
context
.properties
.stream()
.forEach(destinationPropertiesProcessor);
}
@Override
public void registerDestinationTopic(String mainTopicName, String destinationTopicName,
DestinationTopic.Properties destinationTopicProperties, Context context) {
List<DestinationTopic> topicDestinations = context.destinationsByTopicMap
.computeIfAbsent(mainTopicName, newTopic -> new ArrayList<>());
topicDestinations.add(new DestinationTopic(destinationTopicName, destinationTopicProperties));
}
@Override
public void processRegisteredDestinations(Consumer<Collection<String>> topicsCallback, Context context) {
context
.destinationsByTopicMap
.values()
.forEach(topicDestinations -> this.destinationTopicResolver.addDestinationTopics(topicDestinations));
topicsCallback.accept(getAllTopicsNamesForThis(context));
}
private List<String> getAllTopicsNamesForThis(Context context) {
return context.destinationsByTopicMap
.values()
.stream()
.flatMap(Collection::stream)
.map(DestinationTopic::getDestinationName)
.collect(Collectors.toList());
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/DefaultDestinationTopicResolver.java | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.time.Clock;
import java.time.Instant;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationListener;
import org.springframework.context.event.ContextRefreshedEvent;
import org.springframework.kafka.listener.ExceptionClassifier;
import org.springframework.kafka.listener.ListenerExecutionFailedException;
import org.springframework.kafka.listener.TimestampedException;
/**
*
* Default implementation of the DestinationTopicResolver interface.
* The container is closed when a {@link ContextRefreshedEvent} is received
* and no more destinations can be added after that.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @author Yvette Quinby
* @since 2.7
*
*/
public class DefaultDestinationTopicResolver extends ExceptionClassifier
implements DestinationTopicResolver, ApplicationListener<ContextRefreshedEvent> {
private static final String NO_OPS_SUFFIX = "-noOps";
private static final List<Class<? extends Throwable>> FRAMEWORK_EXCEPTIONS =
Arrays.asList(ListenerExecutionFailedException.class, TimestampedException.class);
private final Map<String, DestinationTopicHolder> sourceDestinationsHolderMap;
private final Map<String, DestinationTopic> destinationsTopicMap;
private final Clock clock;
private final ApplicationContext applicationContext;
private boolean contextRefreshed;
public DefaultDestinationTopicResolver(Clock clock, ApplicationContext applicationContext) {
this.applicationContext = applicationContext;
this.clock = clock;
this.sourceDestinationsHolderMap = new HashMap<>();
this.destinationsTopicMap = new HashMap<>();
this.contextRefreshed = false;
}
@Override
public DestinationTopic resolveDestinationTopic(String topic, Integer attempt, Exception e,
long originalTimestamp) {
DestinationTopicHolder destinationTopicHolder = getDestinationHolderFor(topic);
return destinationTopicHolder.getSourceDestination().isDltTopic()
? handleDltProcessingFailure(destinationTopicHolder, e)
: destinationTopicHolder.getSourceDestination().shouldRetryOn(attempt, maybeUnwrapException(e))
&& isNotFatalException(e)
&& !isPastTimout(originalTimestamp, destinationTopicHolder)
? resolveRetryDestination(destinationTopicHolder)
: resolveDltOrNoOpsDestination(topic);
}
private Boolean isNotFatalException(Exception e) {
return getClassifier().classify(e);
}
private Throwable maybeUnwrapException(Throwable e) {
return FRAMEWORK_EXCEPTIONS
.stream()
.filter(frameworkException -> frameworkException.isAssignableFrom(e.getClass()))
.map(frameworkException -> maybeUnwrapException(e.getCause()))
.findFirst()
.orElse(e);
}
private boolean isPastTimout(long originalTimestamp, DestinationTopicHolder destinationTopicHolder) {
long timeout = destinationTopicHolder.getNextDestination().getDestinationTimeout();
return timeout != RetryTopicConstants.NOT_SET &&
Instant.now(this.clock).toEpochMilli() > originalTimestamp + timeout;
}
private DestinationTopic handleDltProcessingFailure(DestinationTopicHolder destinationTopicHolder, Exception e) {
return destinationTopicHolder.getSourceDestination().isAlwaysRetryOnDltFailure()
&& isNotFatalException(e)
? destinationTopicHolder.getSourceDestination()
: destinationTopicHolder.getNextDestination();
}
private DestinationTopic resolveRetryDestination(DestinationTopicHolder destinationTopicHolder) {
return destinationTopicHolder.getSourceDestination().isSingleTopicRetry()
? destinationTopicHolder.getSourceDestination()
: destinationTopicHolder.getNextDestination();
}
@Override
public DestinationTopic getDestinationTopicByName(String topic) {
return Objects.requireNonNull(this.destinationsTopicMap.get(topic),
() -> "No topic found for " + topic);
}
private DestinationTopic resolveDltOrNoOpsDestination(String topic) {
DestinationTopic destination = getDestinationFor(topic);
return destination.isDltTopic() || destination.isNoOpsTopic()
? destination
: resolveDltOrNoOpsDestination(destination.getDestinationName());
}
private DestinationTopic getDestinationFor(String topic) {
return getDestinationHolderFor(topic).getNextDestination();
}
private DestinationTopicHolder getDestinationHolderFor(String topic) {
return this.contextRefreshed
? doGetDestinationFor(topic)
: getDestinationTopicSynchronized(topic);
}
private DestinationTopicHolder getDestinationTopicSynchronized(String topic) {
synchronized (this.sourceDestinationsHolderMap) {
return doGetDestinationFor(topic);
}
}
private DestinationTopicHolder doGetDestinationFor(String topic) {
return Objects.requireNonNull(this.sourceDestinationsHolderMap.get(topic),
() -> "No destination found for topic: " + topic);
}
@Override
public void addDestinationTopics(List<DestinationTopic> destinationsToAdd) {
if (this.contextRefreshed) {
throw new IllegalStateException("Cannot add new destinations, "
+ DefaultDestinationTopicResolver.class.getSimpleName() + " is already refreshed.");
}
synchronized (this.sourceDestinationsHolderMap) {
this.destinationsTopicMap.putAll(destinationsToAdd
.stream()
.collect(Collectors.toMap(destination -> destination.getDestinationName(), destination -> destination)));
this.sourceDestinationsHolderMap.putAll(correlatePairSourceAndDestinationValues(destinationsToAdd));
}
}
private Map<String, DestinationTopicHolder> correlatePairSourceAndDestinationValues(
List<DestinationTopic> destinationList) {
return IntStream
.range(0, destinationList.size())
.boxed()
.collect(Collectors.toMap(index -> destinationList.get(index).getDestinationName(),
index -> new DestinationTopicHolder(destinationList.get(index), getNextDestinationTopic(destinationList, index))));
}
private DestinationTopic getNextDestinationTopic(List<DestinationTopic> destinationList, int index) {
return index != destinationList.size() - 1
? destinationList.get(index + 1)
: new DestinationTopic(destinationList.get(index).getDestinationName() + NO_OPS_SUFFIX,
destinationList.get(index), NO_OPS_SUFFIX, DestinationTopic.Type.NO_OPS);
}
@Override
public void onApplicationEvent(ContextRefreshedEvent event) {
if (Objects.equals(event.getApplicationContext(), this.applicationContext)) {
this.contextRefreshed = true;
}
}
/**
* Return true if the application context is refreshed.
* @return true if refreshed.
* @since 2.7.8
*/
public boolean isContextRefreshed() {
return this.contextRefreshed;
}
public static class DestinationTopicHolder {
private final DestinationTopic sourceDestination;
private final DestinationTopic nextDestination;
DestinationTopicHolder(DestinationTopic sourceDestination, DestinationTopic nextDestination) {
this.sourceDestination = sourceDestination;
this.nextDestination = nextDestination;
}
protected DestinationTopic getNextDestination() {
return this.nextDestination;
}
protected DestinationTopic getSourceDestination() {
return this.sourceDestination;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/DestinationTopic.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.util.Objects;
import java.util.function.BiPredicate;
import org.springframework.kafka.core.KafkaOperations;
import org.springframework.lang.Nullable;
/**
*
* Representation of a Destination Topic to which messages can be forwarded, such as retry topics and dlt.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @since 2.7
*
*/
public class DestinationTopic {
private final String destinationName;
private final Properties properties;
public DestinationTopic(String destinationName, Properties properties) {
this.destinationName = destinationName;
this.properties = properties;
}
public DestinationTopic(String destinationName, DestinationTopic sourceDestinationtopic, String suffix, Type type) {
this.destinationName = destinationName;
this.properties = new Properties(sourceDestinationtopic.properties, suffix, type);
}
public Long getDestinationDelay() {
return this.properties.delayMs;
}
public Integer getDestinationPartitions() {
return this.properties.numPartitions;
}
public boolean isAlwaysRetryOnDltFailure() {
return DltStrategy.ALWAYS_RETRY_ON_ERROR
.equals(this.properties.dltStrategy);
}
public boolean isDltTopic() {
return Type.DLT.equals(this.properties.type);
}
public boolean isNoOpsTopic() {
return Type.NO_OPS.equals(this.properties.type);
}
public boolean isSingleTopicRetry() {
return Type.SINGLE_TOPIC_RETRY.equals(this.properties.type);
}
public boolean isMainTopic() {
return Type.MAIN.equals(this.properties.type);
}
public String getDestinationName() {
return this.destinationName;
}
public KafkaOperations<?, ?> getKafkaOperations() {
return this.properties.kafkaOperations;
}
public boolean shouldRetryOn(Integer attempt, Throwable e) {
return this.properties.shouldRetryOn.test(attempt, e);
}
@Override
public String toString() {
return "DestinationTopic{" +
"destinationName='" + this.destinationName + '\'' +
", properties=" + this.properties +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DestinationTopic that = (DestinationTopic) o;
return this.destinationName.equals(that.destinationName) && this.properties.equals(that.properties);
}
@Override
public int hashCode() {
return Objects.hash(this.destinationName, this.properties);
}
public long getDestinationTimeout() {
return this.properties.timeout;
}
public static class Properties {
private final long delayMs;
private final String suffix;
private final Type type;
private final int maxAttempts;
private final int numPartitions;
private final DltStrategy dltStrategy;
private final KafkaOperations<?, ?> kafkaOperations;
private final BiPredicate<Integer, Throwable> shouldRetryOn;
private final long timeout;
private final Boolean autoStartDltHandler;
/**
* Create an instance with the provided properties with the DLT container starting
* automatically (if the container factory is so configured).
* @param delayMs the delay in ms.
* @param suffix the suffix.
* @param type the type.
* @param maxAttempts the max attempts.
* @param numPartitions the number of partitions.
* @param dltStrategy the DLT strategy.
* @param kafkaOperations the {@link KafkaOperations}.
* @param shouldRetryOn the exception classifications.
* @param timeout the timeout.
*/
public Properties(long delayMs, String suffix, Type type,
int maxAttempts, int numPartitions,
DltStrategy dltStrategy,
KafkaOperations<?, ?> kafkaOperations,
BiPredicate<Integer, Throwable> shouldRetryOn, long timeout) {
this(delayMs, suffix, type, maxAttempts, numPartitions, dltStrategy, kafkaOperations, shouldRetryOn,
timeout, null);
}
/**
* Create an instance with the provided properties with the DLT container starting
* automatically.
* @param sourceProperties the source properties.
* @param suffix the suffix.
* @param type the type.
*/
public Properties(Properties sourceProperties, String suffix, Type type) {
this(sourceProperties.delayMs, suffix, type, sourceProperties.maxAttempts, sourceProperties.numPartitions,
sourceProperties.dltStrategy, sourceProperties.kafkaOperations, sourceProperties.shouldRetryOn,
sourceProperties.timeout, null);
}
/**
* Create an instance with the provided properties.
* @param delayMs the delay in ms.
* @param suffix the suffix.
* @param type the type.
* @param maxAttempts the max attempts.
* @param numPartitions the number of partitions.
* @param dltStrategy the DLT strategy.
* @param kafkaOperations the {@link KafkaOperations}.
* @param shouldRetryOn the exception classifications.
* @param timeout the timeout.
* @param autoStartDltHandler whether or not to start the DLT handler.
* @since 2.8
*/
public Properties(long delayMs, String suffix, Type type,
int maxAttempts, int numPartitions,
DltStrategy dltStrategy,
KafkaOperations<?, ?> kafkaOperations,
BiPredicate<Integer, Throwable> shouldRetryOn, long timeout, @Nullable Boolean autoStartDltHandler) {
this.delayMs = delayMs;
this.suffix = suffix;
this.type = type;
this.maxAttempts = maxAttempts;
this.numPartitions = numPartitions;
this.dltStrategy = dltStrategy;
this.kafkaOperations = kafkaOperations;
this.shouldRetryOn = shouldRetryOn;
this.timeout = timeout;
this.autoStartDltHandler = autoStartDltHandler;
}
public boolean isDltTopic() {
return Type.DLT.equals(this.type);
}
public String suffix() {
return this.suffix;
}
public long delay() {
return this.delayMs;
}
@Nullable
public Boolean autoStartDltHandler() {
return this.autoStartDltHandler;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Properties that = (Properties) o;
return this.delayMs == that.delayMs
&& this.maxAttempts == that.maxAttempts
&& this.numPartitions == that.numPartitions
&& this.suffix.equals(that.suffix)
&& this.type == that.type
&& this.dltStrategy == that.dltStrategy
&& this.kafkaOperations.equals(that.kafkaOperations);
}
@Override
public int hashCode() {
return Objects.hash(this.delayMs, this.suffix, this.type, this.maxAttempts, this.numPartitions,
this.dltStrategy, this.kafkaOperations);
}
@Override
public String toString() {
return "Properties{" +
"delayMs=" + this.delayMs +
", suffix='" + this.suffix + '\'' +
", type=" + this.type +
", maxAttempts=" + this.maxAttempts +
", numPartitions=" + this.numPartitions +
", dltStrategy=" + this.dltStrategy +
", kafkaOperations=" + this.kafkaOperations +
", shouldRetryOn=" + this.shouldRetryOn +
", timeout=" + this.timeout +
'}';
}
public boolean isMainEndpoint() {
return Type.MAIN.equals(this.type);
}
}
enum Type {
MAIN, RETRY, SINGLE_TOPIC_RETRY, DLT, NO_OPS
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/DestinationTopicContainer.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.util.List;
/**
*
* Provides methods to store and retrieve {@link DestinationTopic} instances.
*
* @author Tomaz Fernandes
* @since 2.7
*/
public interface DestinationTopicContainer {
/**
* Adds the provided destination topics to the container.
* @param destinationTopics the {@link DestinationTopic} list to add.
*/
void addDestinationTopics(List<DestinationTopic> destinationTopics);
/**
* Returns the DestinationTopic instance registered for that topic.
* @param topicName the topic name of the DestinationTopic to be returned.
* @return the DestinationTopic instance registered for that topic.
*/
DestinationTopic getDestinationTopicByName(String topicName);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/DestinationTopicProcessor.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
/**
*
* The {@link DestinationTopicProcessor} creates and registers the
* {@link DestinationTopic} instances in the provided {@link Context}, also
* providing callback interfaces to be called upon the context properties.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public interface DestinationTopicProcessor {
/**
* Process the destination properties.
* @param destinationPropertiesProcessor the processor.
* @param context the context.
*/
void processDestinationTopicProperties(Consumer<DestinationTopic.Properties> destinationPropertiesProcessor, Context context);
/**
* Process the registered destinations.
* @param topicsConsumer the consumer.
* @param context the context.
*/
void processRegisteredDestinations(Consumer<Collection<String>> topicsConsumer, Context context);
/**
* Register the destination topic.
* @param mainTopicName the main topic name.
* @param destinationTopicName the destination topic name.
* @param destinationTopicProperties the destination topic properties.
* @param context the context.
*/
void registerDestinationTopic(String mainTopicName, String destinationTopicName,
DestinationTopic.Properties destinationTopicProperties, Context context);
class Context {
protected final Map<String, List<DestinationTopic>> destinationsByTopicMap; // NOSONAR
protected final List<DestinationTopic.Properties> properties; // NOSONAR
public Context(List<DestinationTopic.Properties> properties) {
this.destinationsByTopicMap = new HashMap<>();
this.properties = properties;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/DestinationTopicPropertiesFactory.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.util.Arrays;
import java.util.List;
import java.util.function.BiPredicate;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.springframework.classify.BinaryExceptionClassifier;
import org.springframework.kafka.core.KafkaOperations;
import org.springframework.util.StringUtils;
/**
*
* Creates a list of {@link DestinationTopic.Properties} based on the
* provided configurations.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @since 2.7
*
*/
public class DestinationTopicPropertiesFactory {
private static final String MAIN_TOPIC_SUFFIX = "";
private final DestinationTopicSuffixes destinationTopicSuffixes;
private final List<Long> backOffValues;
private final BinaryExceptionClassifier exceptionClassifier;
private final int numPartitions;
private final int maxAttempts;
private final KafkaOperations<?, ?> kafkaOperations;
private final FixedDelayStrategy fixedDelayStrategy;
private final DltStrategy dltStrategy;
private final TopicSuffixingStrategy topicSuffixingStrategy;
private final long timeout;
private Boolean autoStartDltHandler;
public DestinationTopicPropertiesFactory(String retryTopicSuffix, String dltSuffix, List<Long> backOffValues,
BinaryExceptionClassifier exceptionClassifier,
int numPartitions, KafkaOperations<?, ?> kafkaOperations,
FixedDelayStrategy fixedDelayStrategy,
DltStrategy dltStrategy,
TopicSuffixingStrategy topicSuffixingStrategy,
long timeout) {
this.dltStrategy = dltStrategy;
this.kafkaOperations = kafkaOperations;
this.exceptionClassifier = exceptionClassifier;
this.numPartitions = numPartitions;
this.fixedDelayStrategy = fixedDelayStrategy;
this.topicSuffixingStrategy = topicSuffixingStrategy;
this.timeout = timeout;
this.destinationTopicSuffixes = new DestinationTopicSuffixes(retryTopicSuffix, dltSuffix);
this.backOffValues = backOffValues;
// Max Attempts include the initial try.
this.maxAttempts = this.backOffValues.size() + 1;
}
/**
* Set to false to not start the DLT handler.
* @param autoStart false to not start.
* @return this factory.
* @since 2.8
*/
public DestinationTopicPropertiesFactory autoStartDltHandler(Boolean autoStart) {
this.autoStartDltHandler = autoStart;
return this;
}
public List<DestinationTopic.Properties> createProperties() {
return isSingleTopicFixedDelay()
? createPropertiesForFixedDelaySingleTopic()
: createPropertiesForDefaultTopicStrategy();
}
private List<DestinationTopic.Properties> createPropertiesForFixedDelaySingleTopic() {
return isNoDltStrategy()
? Arrays.asList(createMainTopicProperties(),
createRetryProperties(1, DestinationTopic.Type.SINGLE_TOPIC_RETRY, getShouldRetryOn()))
: Arrays.asList(createMainTopicProperties(),
createRetryProperties(1, DestinationTopic.Type.SINGLE_TOPIC_RETRY, getShouldRetryOn()),
createDltProperties());
}
private boolean isSingleTopicFixedDelay() {
return isFixedDelay() && isSingleTopicStrategy();
}
private boolean isSingleTopicStrategy() {
return FixedDelayStrategy.SINGLE_TOPIC.equals(this.fixedDelayStrategy);
}
private List<DestinationTopic.Properties> createPropertiesForDefaultTopicStrategy() {
return IntStream.rangeClosed(0, isNoDltStrategy()
? this.maxAttempts - 1
: this.maxAttempts)
.mapToObj(this::createRetryOrDltTopicSuffixes)
.collect(Collectors.toList());
}
private boolean isNoDltStrategy() {
return DltStrategy.NO_DLT.equals(this.dltStrategy);
}
private DestinationTopic.Properties createRetryOrDltTopicSuffixes(int index) {
BiPredicate<Integer, Throwable> shouldRetryOn = getShouldRetryOn();
return index == 0
? createMainTopicProperties()
: index < this.maxAttempts
? createRetryProperties(index, DestinationTopic.Type.RETRY, shouldRetryOn)
: createDltProperties();
}
private DestinationTopic.Properties createMainTopicProperties() {
return new DestinationTopic.Properties(0, MAIN_TOPIC_SUFFIX, DestinationTopic.Type.MAIN, this.maxAttempts,
this.numPartitions, this.dltStrategy, this.kafkaOperations, getShouldRetryOn(), this.timeout);
}
private DestinationTopic.Properties createDltProperties() {
return new DestinationTopic.Properties(0, this.destinationTopicSuffixes.getDltSuffix(),
DestinationTopic.Type.DLT, this.maxAttempts, this.numPartitions, this.dltStrategy,
this.kafkaOperations, (a, e) -> false, this.timeout, this.autoStartDltHandler);
}
private BiPredicate<Integer, Throwable> getShouldRetryOn() {
return (attempt, throwable) -> attempt < this.maxAttempts && this.exceptionClassifier.classify(throwable);
}
private DestinationTopic.Properties createRetryProperties(int index,
DestinationTopic.Type topicType,
BiPredicate<Integer, Throwable> shouldRetryOn) {
int indexInBackoffValues = index - 1;
Long thisBackOffValue = this.backOffValues.get(indexInBackoffValues);
return createProperties(topicType, shouldRetryOn, indexInBackoffValues,
getTopicSuffix(indexInBackoffValues, thisBackOffValue));
}
private String getTopicSuffix(int indexInBackoffValues, Long thisBackOffValue) {
return isSingleTopicFixedDelay()
? this.destinationTopicSuffixes.getRetrySuffix()
: isSuffixWithIndexStrategy() || isFixedDelay()
? joinWithRetrySuffix(indexInBackoffValues)
: hasDuplicates(thisBackOffValue)
? joinWithRetrySuffix(thisBackOffValue)
.concat("-" + getIndexInBackoffValues(indexInBackoffValues, thisBackOffValue))
: joinWithRetrySuffix(thisBackOffValue);
}
private int getIndexInBackoffValues(int indexInBackoffValues, Long thisBackOffValue) {
return indexInBackoffValues - this.backOffValues.indexOf(thisBackOffValue);
}
private boolean isSuffixWithIndexStrategy() {
return TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE.equals(this.topicSuffixingStrategy);
}
private boolean hasDuplicates(Long thisBackOffValue) {
return this
.backOffValues
.stream()
.filter(value -> value.equals(thisBackOffValue))
.count() > 1;
}
private DestinationTopic.Properties createProperties(DestinationTopic.Type topicType,
BiPredicate<Integer, Throwable> shouldRetryOn,
int indexInBackoffValues,
String suffix) {
return new DestinationTopic.Properties(this.backOffValues.get(indexInBackoffValues), suffix,
topicType, this.maxAttempts, this.numPartitions, this.dltStrategy,
this.kafkaOperations, shouldRetryOn, this.timeout);
}
private boolean isFixedDelay() {
// If all values are the same, such as in NoBackOffPolicy and FixedBackoffPolicy
return this.backOffValues.size() > 1 && this.backOffValues.stream().distinct().count() == 1;
}
private String joinWithRetrySuffix(long parameter) {
return String.join("-", this.destinationTopicSuffixes.getRetrySuffix(), String.valueOf(parameter));
}
public static class DestinationTopicSuffixes {
private final String retryTopicSuffix;
private final String dltSuffix;
public DestinationTopicSuffixes(String retryTopicSuffix, String dltSuffix) {
this.retryTopicSuffix = StringUtils.hasText(retryTopicSuffix)
? retryTopicSuffix
: RetryTopicConstants.DEFAULT_RETRY_SUFFIX;
this.dltSuffix = StringUtils.hasText(dltSuffix) ? dltSuffix : RetryTopicConstants.DEFAULT_DLT_SUFFIX;
}
public String getRetrySuffix() {
return this.retryTopicSuffix;
}
public String getDltSuffix() {
return this.dltSuffix;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/DestinationTopicResolver.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
/**
*
* Provides methods for resolving the destination to which a message that failed
* to be processed should be forwarded to.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public interface DestinationTopicResolver extends DestinationTopicContainer {
/**
*
* Resolves the destination topic for the failed message.
*
* @param topic the current topic for the message.
* @param attempt the number of processing attempts already made for that message.
* @param e the exception the message processing has thrown
* @param originalTimestamp the time when the first attempt to process the message
* threw an exception.
* @return the {@link DestinationTopic} for the given parameters.
*
*/
DestinationTopic resolveDestinationTopic(String topic, Integer attempt, Exception e, long originalTimestamp);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/DltStrategy.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
/**
*
* Strategies for handling DLT processing.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public enum DltStrategy {
/**
* Don't create a DLT.
*/
NO_DLT,
/**
* Always send the message back to the DLT for reprocessing in case of failure in
* DLT processing.
*/
ALWAYS_RETRY_ON_ERROR,
/**
* Fail if DLT processing throws an error.
*/
FAIL_ON_ERROR
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/EndpointCustomizer.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.util.Collection;
import org.springframework.kafka.config.MethodKafkaListenerEndpoint;
/**
* Customizes main, retry and DLT endpoints in the Retry Topic functionality
* and returns the resulting topic names.
*
* @author Tomaz Fernandes
* @since 2.7.2
*
* @see EndpointCustomizerFactory
*
*/
@FunctionalInterface
public interface EndpointCustomizer {
/**
* Customize the endpoint and return the topic names generated for this endpoint.
* @param listenerEndpoint The main, retry or DLT endpoint to be customized.
* @return A collection containing the topic names generated for this endpoint.
*/
Collection<TopicNamesHolder> customizeEndpointAndCollectTopics(MethodKafkaListenerEndpoint<?, ?> listenerEndpoint);
class TopicNamesHolder {
private final String mainTopic;
private final String customizedTopic;
TopicNamesHolder(String mainTopic, String customizedTopic) {
this.mainTopic = mainTopic;
this.customizedTopic = customizedTopic;
}
String getMainTopic() {
return this.mainTopic;
}
String getCustomizedTopic() {
return this.customizedTopic;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/EndpointCustomizerFactory.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.Collection;
import java.util.stream.Collectors;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.kafka.config.MethodKafkaListenerEndpoint;
import org.springframework.kafka.support.EndpointHandlerMethod;
import org.springframework.kafka.support.TopicPartitionOffset;
/**
*
* Creates the {@link EndpointCustomizer} that will be used by the {@link RetryTopicConfigurer}
* to customize the main, retry and DLT endpoints.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @since 2.7.2
*
* @see RetryTopicConfigurer
* @see DestinationTopic.Properties
* @see RetryTopicNamesProviderFactory.RetryTopicNamesProvider
*
*/
public class EndpointCustomizerFactory {
private final DestinationTopic.Properties destinationProperties;
private final EndpointHandlerMethod beanMethod;
private final BeanFactory beanFactory;
private final RetryTopicNamesProviderFactory retryTopicNamesProviderFactory;
EndpointCustomizerFactory(DestinationTopic.Properties destinationProperties, EndpointHandlerMethod beanMethod,
BeanFactory beanFactory, RetryTopicNamesProviderFactory retryTopicNamesProviderFactory) {
this.destinationProperties = destinationProperties;
this.beanMethod = beanMethod;
this.beanFactory = beanFactory;
this.retryTopicNamesProviderFactory = retryTopicNamesProviderFactory;
}
public final EndpointCustomizer createEndpointCustomizer() {
return addSuffixesAndMethod(this.destinationProperties, this.beanMethod.resolveBean(this.beanFactory),
this.beanMethod.getMethod());
}
protected EndpointCustomizer addSuffixesAndMethod(DestinationTopic.Properties properties, Object bean, Method method) {
RetryTopicNamesProviderFactory.RetryTopicNamesProvider namesProvider =
this.retryTopicNamesProviderFactory.createRetryTopicNamesProvider(properties);
return endpoint -> {
Collection<EndpointCustomizer.TopicNamesHolder> topics = customizeAndRegisterTopics(namesProvider, endpoint);
endpoint.setId(namesProvider.getEndpointId(endpoint));
endpoint.setGroupId(namesProvider.getGroupId(endpoint));
endpoint.setTopics(topics.stream().map(EndpointCustomizer.TopicNamesHolder::getCustomizedTopic).toArray(String[]::new));
endpoint.setClientIdPrefix(namesProvider.getClientIdPrefix(endpoint));
endpoint.setGroup(namesProvider.getGroup(endpoint));
endpoint.setBean(bean);
endpoint.setMethod(method);
Boolean autoStartDltHandler = properties.autoStartDltHandler();
if (autoStartDltHandler != null && properties.isDltTopic()) {
endpoint.setAutoStartup(autoStartDltHandler);
}
return topics;
};
}
protected Collection<EndpointCustomizer.TopicNamesHolder> customizeAndRegisterTopics(
RetryTopicNamesProviderFactory.RetryTopicNamesProvider namesProvider,
MethodKafkaListenerEndpoint<?, ?> endpoint) {
return getTopics(endpoint)
.stream()
.map(topic -> new EndpointCustomizer.TopicNamesHolder(topic, namesProvider.getTopicName(topic)))
.collect(Collectors.toList());
}
private Collection<String> getTopics(MethodKafkaListenerEndpoint<?, ?> endpoint) {
Collection<String> topics = endpoint.getTopics();
if (topics.isEmpty()) {
TopicPartitionOffset[] topicPartitionsToAssign = endpoint.getTopicPartitionsToAssign();
if (topicPartitionsToAssign != null && topicPartitionsToAssign.length > 0) {
topics = Arrays.stream(topicPartitionsToAssign)
.map(TopicPartitionOffset::getTopic)
.distinct()
.collect(Collectors.toList());
}
}
if (topics.isEmpty()) {
throw new IllegalStateException(
String.format("No topics were provided for RetryTopicConfiguration for method %s in class %s.",
endpoint.getMethod().getName(), endpoint.getBean().getClass().getSimpleName()));
}
return topics;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/FixedDelayStrategy.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
/**
*
* Defines the topic strategy to handle fixed delays.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public enum FixedDelayStrategy {
/**
* Uses a single topic to achieve non-blocking retry.
*/
SINGLE_TOPIC,
/**
* Uses one separate topic per retry attempt.
*/
MULTIPLE_TOPICS
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/ListenerContainerFactoryConfigurer.java | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.time.Clock;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.Consumer;
import java.util.regex.Pattern;
import org.apache.commons.logging.LogFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.KafkaException;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerEndpoint;
import org.springframework.kafka.listener.AcknowledgingConsumerAwareMessageListener;
import org.springframework.kafka.listener.CommonErrorHandler;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
import org.springframework.kafka.listener.DefaultErrorHandler;
import org.springframework.kafka.listener.KafkaConsumerBackoffManager;
import org.springframework.kafka.listener.adapter.KafkaBackoffAwareMessageListenerAdapter;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.util.Assert;
import org.springframework.util.backoff.BackOff;
/**
*
* Decorates the provided {@link ConcurrentKafkaListenerContainerFactory} to add a
* {@link DefaultErrorHandler} and the {@link DeadLetterPublishingRecoverer}
* created by the {@link DeadLetterPublishingRecovererFactory}.
*
* Also sets {@link ContainerProperties#setIdlePartitionEventInterval(Long)}
* and {@link ContainerProperties#setPollTimeout(long)} if its defaults haven't
* been overridden by the user.
*
* Since 2.8.3 these configurations don't interfere with the provided factory
* instance itself, so the same factory instance can be shared among retryable and
* non-retryable endpoints.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public class ListenerContainerFactoryConfigurer {
private static final Set<ConcurrentKafkaListenerContainerFactory<?, ?>> CONFIGURED_FACTORIES_CACHE;
private static final LogAccessor LOGGER = new LogAccessor(
LogFactory.getLog(ListenerContainerFactoryConfigurer.class));
static {
CONFIGURED_FACTORIES_CACHE = new HashSet<>();
}
private static final int MIN_POLL_TIMEOUT_VALUE = 100;
private static final int MAX_POLL_TIMEOUT_VALUE = 5000;
private static final int POLL_TIMEOUT_DIVISOR = 4;
private static final long LOWEST_BACKOFF_THRESHOLD = 1500L;
private BackOff providedBlockingBackOff = null;
private Class<? extends Exception>[] blockingExceptionTypes = null;
private Consumer<ConcurrentMessageListenerContainer<?, ?>> containerCustomizer = container -> {
};
private Consumer<CommonErrorHandler> errorHandlerCustomizer = errorHandler -> {
};
private final DeadLetterPublishingRecovererFactory deadLetterPublishingRecovererFactory;
private final KafkaConsumerBackoffManager kafkaConsumerBackoffManager;
private final Clock clock;
public ListenerContainerFactoryConfigurer(KafkaConsumerBackoffManager kafkaConsumerBackoffManager,
DeadLetterPublishingRecovererFactory deadLetterPublishingRecovererFactory,
@Qualifier(RetryTopicInternalBeanNames
.INTERNAL_BACKOFF_CLOCK_BEAN_NAME) Clock clock) {
this.kafkaConsumerBackoffManager = kafkaConsumerBackoffManager;
this.deadLetterPublishingRecovererFactory = deadLetterPublishingRecovererFactory;
this.clock = clock;
}
/**
* Configures the provided {@link ConcurrentKafkaListenerContainerFactory}.
* @param containerFactory the factory instance to be configured.
* @param configuration the configuration provided by the {@link RetryTopicConfiguration}.
* @return the configured factory instance.
* @deprecated in favor of
* {@link #decorateFactory(ConcurrentKafkaListenerContainerFactory, Configuration)}.
*/
@Deprecated
public ConcurrentKafkaListenerContainerFactory<?, ?> configure(
ConcurrentKafkaListenerContainerFactory<?, ?> containerFactory, Configuration configuration) {
return isCached(containerFactory)
? containerFactory
: addToCache(doConfigure(containerFactory, configuration, true));
}
/**
* Configures the provided {@link ConcurrentKafkaListenerContainerFactory}.
* Meant to be used for the main endpoint, this method ignores the provided backOff values.
* @param containerFactory the factory instance to be configured.
* @param configuration the configuration provided by the {@link RetryTopicConfiguration}.
* @return the configured factory instance.
* @deprecated in favor of
* {@link #decorateFactoryWithoutSettingContainerProperties(ConcurrentKafkaListenerContainerFactory, Configuration)}.
*/
@Deprecated
public ConcurrentKafkaListenerContainerFactory<?, ?> configureWithoutBackOffValues(
ConcurrentKafkaListenerContainerFactory<?, ?> containerFactory, Configuration configuration) {
return isCached(containerFactory)
? containerFactory
: doConfigure(containerFactory, configuration, false);
}
/**
* Decorates the provided {@link ConcurrentKafkaListenerContainerFactory}.
* @param factory the factory instance to be decorated.
* @param configuration the configuration provided by the {@link RetryTopicConfiguration}.
* @return the decorated factory instance.
*/
public KafkaListenerContainerFactory<?> decorateFactory(ConcurrentKafkaListenerContainerFactory<?, ?> factory,
Configuration configuration) {
return new RetryTopicListenerContainerFactoryDecorator(factory, configuration, true);
}
/**
* Decorates the provided {@link ConcurrentKafkaListenerContainerFactory}.
* Meant to be used for the main endpoint, this method ignores the provided backOff values.
* @param factory the factory instance to be decorated.
* @param configuration the configuration provided by the {@link RetryTopicConfiguration}.
* @return the decorated factory instance.
*/
public KafkaListenerContainerFactory<?> decorateFactoryWithoutSettingContainerProperties(
ConcurrentKafkaListenerContainerFactory<?, ?> factory, Configuration configuration) {
return new RetryTopicListenerContainerFactoryDecorator(factory, configuration, false);
}
/**
* Set a {@link BackOff} to be used with blocking retries.
* If the BackOff execution returns STOP, the record will be forwarded
* to the next retry topic or to the DLT, depending on how the non-blocking retries
* are configured.
* @param blockingBackOff the BackOff policy to be used by blocking retries.
* @since 2.8.4
* @see DefaultErrorHandler
*/
public void setBlockingRetriesBackOff(BackOff blockingBackOff) {
Assert.notNull(blockingBackOff, "The provided BackOff cannot be null");
Assert.state(this.providedBlockingBackOff == null, () ->
"Blocking retries back off has already been set. Current: "
+ this.providedBlockingBackOff
+ " You provided: " + blockingBackOff);
this.providedBlockingBackOff = blockingBackOff;
}
/**
* Specify the exceptions to be retried via blocking.
* @param exceptionTypes the exceptions that should be retried.
* @since 2.8.4
* @see DefaultErrorHandler
*/
@SafeVarargs
@SuppressWarnings("varargs")
public final void setBlockingRetryableExceptions(Class<? extends Exception>... exceptionTypes) {
Assert.notNull(exceptionTypes, "The exception types cannot be null");
Assert.noNullElements(exceptionTypes, "The exception types cannot have null elements");
Assert.state(this.blockingExceptionTypes == null,
() -> "Blocking retryable exceptions have already been set."
+ "Current ones: " + Arrays.toString(this.blockingExceptionTypes)
+ " You provided: " + Arrays.toString(exceptionTypes));
this.blockingExceptionTypes = Arrays.copyOf(exceptionTypes, exceptionTypes.length);
}
private ConcurrentKafkaListenerContainerFactory<?, ?> doConfigure(
ConcurrentKafkaListenerContainerFactory<?, ?> containerFactory, Configuration configuration,
boolean isSetContainerProperties) {
containerFactory
.setContainerCustomizer(container -> setupBackoffAwareMessageListenerAdapter(container, configuration, isSetContainerProperties));
containerFactory
.setCommonErrorHandler(createErrorHandler(this.deadLetterPublishingRecovererFactory.create(), configuration));
return containerFactory;
}
private boolean isCached(ConcurrentKafkaListenerContainerFactory<?, ?> containerFactory) {
synchronized (CONFIGURED_FACTORIES_CACHE) {
return CONFIGURED_FACTORIES_CACHE.contains(containerFactory);
}
}
private ConcurrentKafkaListenerContainerFactory<?, ?> addToCache(ConcurrentKafkaListenerContainerFactory<?, ?> containerFactory) {
synchronized (CONFIGURED_FACTORIES_CACHE) {
CONFIGURED_FACTORIES_CACHE.add(containerFactory);
return containerFactory;
}
}
public void setContainerCustomizer(Consumer<ConcurrentMessageListenerContainer<?, ?>> containerCustomizer) {
Assert.notNull(containerCustomizer, "'containerCustomizer' cannot be null");
this.containerCustomizer = containerCustomizer;
}
public void setErrorHandlerCustomizer(Consumer<CommonErrorHandler> errorHandlerCustomizer) {
this.errorHandlerCustomizer = errorHandlerCustomizer;
}
protected CommonErrorHandler createErrorHandler(DeadLetterPublishingRecoverer deadLetterPublishingRecoverer,
Configuration configuration) {
DefaultErrorHandler errorHandler = createDefaultErrorHandlerInstance(deadLetterPublishingRecoverer);
errorHandler.defaultFalse();
errorHandler.setCommitRecovered(true);
errorHandler.setLogLevel(KafkaException.Level.DEBUG);
if (this.blockingExceptionTypes != null) {
errorHandler.addRetryableExceptions(this.blockingExceptionTypes);
}
this.errorHandlerCustomizer.accept(errorHandler);
return errorHandler;
}
protected DefaultErrorHandler createDefaultErrorHandlerInstance(DeadLetterPublishingRecoverer deadLetterPublishingRecoverer) {
return this.providedBlockingBackOff != null
? new DefaultErrorHandler(deadLetterPublishingRecoverer, this.providedBlockingBackOff)
: new DefaultErrorHandler(deadLetterPublishingRecoverer);
}
protected void setupBackoffAwareMessageListenerAdapter(ConcurrentMessageListenerContainer<?, ?> container,
Configuration configuration, boolean isSetContainerProperties) {
AcknowledgingConsumerAwareMessageListener<?, ?> listener = checkAndCast(container.getContainerProperties()
.getMessageListener(), AcknowledgingConsumerAwareMessageListener.class);
if (isSetContainerProperties && !configuration.backOffValues.isEmpty()) {
configurePollTimeoutAndIdlePartitionInterval(container, configuration);
}
container.setupMessageListener(new KafkaBackoffAwareMessageListenerAdapter<>(listener,
this.kafkaConsumerBackoffManager, container.getListenerId(), this.clock)); // NOSONAR
this.containerCustomizer.accept(container);
}
protected void configurePollTimeoutAndIdlePartitionInterval(ConcurrentMessageListenerContainer<?, ?> container,
Configuration configuration) {
ContainerProperties containerProperties = container.getContainerProperties();
long pollTimeoutValue = getPollTimeoutValue(containerProperties, configuration);
long idlePartitionEventInterval = getIdlePartitionInterval(containerProperties, pollTimeoutValue);
LOGGER.debug(() -> "pollTimeout and idlePartitionEventInterval for back off values "
+ configuration.backOffValues + " will be set to " + pollTimeoutValue
+ " and " + idlePartitionEventInterval);
containerProperties
.setIdlePartitionEventInterval(idlePartitionEventInterval);
containerProperties.setPollTimeout(pollTimeoutValue);
}
protected long getIdlePartitionInterval(ContainerProperties containerProperties, long pollTimeoutValue) {
Long idlePartitionEventInterval = containerProperties.getIdlePartitionEventInterval();
return idlePartitionEventInterval != null && idlePartitionEventInterval > 0
? idlePartitionEventInterval
: pollTimeoutValue;
}
protected long getPollTimeoutValue(ContainerProperties containerProperties, Configuration configuration) {
if (containerProperties.getPollTimeout() != ContainerProperties.DEFAULT_POLL_TIMEOUT
|| configuration.backOffValues.isEmpty()) {
return containerProperties.getPollTimeout();
}
Long lowestBackOff = configuration.backOffValues
.stream()
.min(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalArgumentException("No back off values found!"));
return lowestBackOff > LOWEST_BACKOFF_THRESHOLD
? applyLimits(lowestBackOff / POLL_TIMEOUT_DIVISOR)
: MIN_POLL_TIMEOUT_VALUE;
}
private long applyLimits(long pollTimeoutValue) {
return Math.min(Math.max(pollTimeoutValue, MIN_POLL_TIMEOUT_VALUE), MAX_POLL_TIMEOUT_VALUE);
}
@SuppressWarnings("unchecked")
private <T> T checkAndCast(Object obj, Class<T> clazz) {
Assert.isAssignable(clazz, obj.getClass(),
() -> String.format("The provided class %s is not assignable from %s",
obj.getClass().getSimpleName(), clazz.getSimpleName()));
return (T) obj;
}
private class RetryTopicListenerContainerFactoryDecorator
implements KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<?, ?>> {
private final ConcurrentKafkaListenerContainerFactory<?, ?> delegate;
private final Configuration configuration;
private final boolean isSetContainerProperties;
RetryTopicListenerContainerFactoryDecorator(ConcurrentKafkaListenerContainerFactory<?, ?> delegate,
Configuration configuration,
boolean isSetContainerProperties) {
this.delegate = delegate;
this.configuration = configuration;
this.isSetContainerProperties = isSetContainerProperties;
}
@Override
public ConcurrentMessageListenerContainer<?, ?> createListenerContainer(KafkaListenerEndpoint endpoint) {
return decorate(this.delegate.createListenerContainer(endpoint));
}
private ConcurrentMessageListenerContainer<?, ?> decorate(ConcurrentMessageListenerContainer<?, ?> listenerContainer) {
listenerContainer
.setCommonErrorHandler(createErrorHandler(
ListenerContainerFactoryConfigurer.this.deadLetterPublishingRecovererFactory.create(),
this.configuration));
setupBackoffAwareMessageListenerAdapter(listenerContainer, this.configuration, this.isSetContainerProperties);
return listenerContainer;
}
@Override
public ConcurrentMessageListenerContainer<?, ?> createContainer(TopicPartitionOffset... topicPartitions) {
return decorate(this.delegate.createContainer(topicPartitions));
}
@Override
public ConcurrentMessageListenerContainer<?, ?> createContainer(String... topics) {
return decorate(this.delegate.createContainer(topics));
}
@Override
public ConcurrentMessageListenerContainer<?, ?> createContainer(Pattern topicPattern) {
return decorate(this.delegate.createContainer(topicPattern));
}
}
static class Configuration {
private final List<Long> backOffValues;
Configuration(List<Long> backOffValues) {
this.backOffValues = backOffValues;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/ListenerContainerFactoryResolver.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.lang.Nullable;
import org.springframework.util.StringUtils;
/**
*
* Resolves a {@link ConcurrentKafkaListenerContainerFactory} to be used by the
* {@link RetryTopicConfiguration}.
*
* @author Tomaz Fernandes
* @since 2.7
*
* @see ListenerContainerFactoryConfigurer
*
*/
public class ListenerContainerFactoryResolver {
private final BeanFactory beanFactory;
private final List<FactoryResolver> mainEndpointResolvers;
private final List<FactoryResolver> retryEndpointResolvers;
private final Cache mainEndpointCache;
private final Cache retryEndpointCache;
ListenerContainerFactoryResolver(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
this.mainEndpointCache = new Cache();
this.retryEndpointCache = new Cache();
this.mainEndpointResolvers = Arrays.asList(
this.mainEndpointCache::fromCache,
(fromKafkaListenerAnnotation, configuration) -> fromKafkaListenerAnnotation,
(fromKLAnnotation, configuration) -> configuration.factoryFromRetryTopicConfiguration,
(fromKLAnnotation, configuration) -> fromBeanName(configuration.listenerContainerFactoryName),
(fromKLAnnotation, configuration) ->
fromBeanName(RetryTopicInternalBeanNames.DEFAULT_LISTENER_FACTORY_BEAN_NAME));
this.retryEndpointResolvers = Arrays.asList(
this.retryEndpointCache::fromCache,
(fromKLAnnotation, configuration) -> configuration.factoryFromRetryTopicConfiguration,
(fromKLAnnotation, configuration) -> fromBeanName(configuration.listenerContainerFactoryName),
(fromKLAnnotation, configuration) -> fromKLAnnotation,
(fromKLAnnotation, configuration) ->
fromBeanName(RetryTopicInternalBeanNames.DEFAULT_LISTENER_FACTORY_BEAN_NAME));
}
ConcurrentKafkaListenerContainerFactory<?, ?> resolveFactoryForMainEndpoint(
@Nullable KafkaListenerContainerFactory<?> factoryFromKafkaListenerAnnotationInstance,
String defaultContainerFactoryBeanName,
Configuration config) {
KafkaListenerContainerFactory<?> factoryFromKafkaListenerAnnotation =
getFactoryFromKLA(factoryFromKafkaListenerAnnotationInstance, defaultContainerFactoryBeanName);
ConcurrentKafkaListenerContainerFactory<?, ?> resolvedFactory = resolveFactory(this.mainEndpointResolvers,
factoryFromKafkaListenerAnnotation, config);
return this.mainEndpointCache.addIfAbsent(factoryFromKafkaListenerAnnotation, config, resolvedFactory);
}
ConcurrentKafkaListenerContainerFactory<?, ?> resolveFactoryForRetryEndpoint(
@Nullable KafkaListenerContainerFactory<?> factoryFromKafkaListenerAnnotationInstance,
String defaultContainerFactoryBeanName,
Configuration config) {
KafkaListenerContainerFactory<?> factoryFromKafkaListenerAnnotation =
getFactoryFromKLA(factoryFromKafkaListenerAnnotationInstance, defaultContainerFactoryBeanName);
ConcurrentKafkaListenerContainerFactory<?, ?> resolvedFactory = resolveFactory(this.retryEndpointResolvers,
factoryFromKafkaListenerAnnotation, config);
return this.retryEndpointCache.addIfAbsent(factoryFromKafkaListenerAnnotation, config, resolvedFactory);
}
@Nullable
private KafkaListenerContainerFactory<?> getFactoryFromKLA(KafkaListenerContainerFactory<?> factoryFromKafkaListenerAnnotationInstance,
String defaultContainerFactoryBeanName) {
KafkaListenerContainerFactory<?> factoryFromKafkaListenerAnnotation =
factoryFromKafkaListenerAnnotationInstance;
if (factoryFromKafkaListenerAnnotation == null) {
factoryFromKafkaListenerAnnotation = fromBeanName(defaultContainerFactoryBeanName);
}
return factoryFromKafkaListenerAnnotation;
}
private ConcurrentKafkaListenerContainerFactory<?, ?> resolveFactory(List<FactoryResolver> factoryResolvers,
KafkaListenerContainerFactory<?> factoryFromKafkaListenerAnnotation,
Configuration config) {
ConcurrentKafkaListenerContainerFactory<?, ?> verifiedFactoryFromKafkaListenerAnnotation = verifyClass(
factoryFromKafkaListenerAnnotation);
return factoryResolvers
.stream()
.map(resolver -> Optional.ofNullable(
resolver.resolveFactory(verifiedFactoryFromKafkaListenerAnnotation, config)))
.filter(Optional::isPresent)
.map(Optional::get)
.findFirst()
.orElseThrow(() -> new IllegalArgumentException("Could not resolve a viable " +
"ConcurrentKafkaListenerContainerFactory to configure the retry topic. " +
"Try creating a bean with name " +
RetryTopicInternalBeanNames.DEFAULT_LISTENER_FACTORY_BEAN_NAME));
}
@Nullable
private ConcurrentKafkaListenerContainerFactory<?, ?> verifyClass(KafkaListenerContainerFactory<?> fromKafkaListenerAnnotationFactory) {
return fromKafkaListenerAnnotationFactory != null
&& ConcurrentKafkaListenerContainerFactory.class.isAssignableFrom(fromKafkaListenerAnnotationFactory.getClass())
? (ConcurrentKafkaListenerContainerFactory<?, ?>) fromKafkaListenerAnnotationFactory
: null;
}
@Nullable
private ConcurrentKafkaListenerContainerFactory<?, ?> fromBeanName(String factoryBeanName) {
return StringUtils.hasText(factoryBeanName)
? this.beanFactory.getBean(factoryBeanName, ConcurrentKafkaListenerContainerFactory.class)
: null;
}
private interface FactoryResolver {
ConcurrentKafkaListenerContainerFactory<?, ?> resolveFactory(ConcurrentKafkaListenerContainerFactory<?, ?> candidate,
Configuration configuration);
}
static class Configuration {
private final ConcurrentKafkaListenerContainerFactory<?, ?> factoryFromRetryTopicConfiguration;
private final String listenerContainerFactoryName;
Configuration(ConcurrentKafkaListenerContainerFactory<?, ?> factoryFromRetryTopicConfiguration,
String listenerContainerFactoryName) {
this.factoryFromRetryTopicConfiguration = factoryFromRetryTopicConfiguration;
this.listenerContainerFactoryName = listenerContainerFactoryName;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Configuration that = (Configuration) o;
return Objects.equals(this.factoryFromRetryTopicConfiguration, that.factoryFromRetryTopicConfiguration)
&& Objects.equals(this.listenerContainerFactoryName, that.listenerContainerFactoryName);
}
@Override
public int hashCode() {
return Objects.hash(this.factoryFromRetryTopicConfiguration, this.listenerContainerFactoryName);
}
}
static class Cache {
private final Map<Key, ConcurrentKafkaListenerContainerFactory<?, ?>> cacheMap;
Cache() {
this.cacheMap = new HashMap<>();
}
ConcurrentKafkaListenerContainerFactory<?, ?> addIfAbsent(KafkaListenerContainerFactory<?> factoryFromKafkaListenerAnnotation,
Configuration config,
ConcurrentKafkaListenerContainerFactory<?, ?> resolvedFactory) {
synchronized (this.cacheMap) {
Key key = cacheKey(factoryFromKafkaListenerAnnotation, config);
if (!this.cacheMap.containsKey(key)) {
this.cacheMap.put(key, resolvedFactory);
}
return resolvedFactory;
}
}
ConcurrentKafkaListenerContainerFactory<?, ?> fromCache(KafkaListenerContainerFactory<?> factoryFromKafkaListenerAnnotation,
Configuration config) {
synchronized (this.cacheMap) {
return this.cacheMap.get(cacheKey(factoryFromKafkaListenerAnnotation, config));
}
}
private Key cacheKey(KafkaListenerContainerFactory<?> factoryFromKafkaListenerAnnotation, Configuration config) {
return new Key(factoryFromKafkaListenerAnnotation, config);
}
static class Key {
private final KafkaListenerContainerFactory<?> factoryFromKafkaListenerAnnotation;
private final Configuration config;
Key(KafkaListenerContainerFactory<?> factoryFromKafkaListenerAnnotation, Configuration config) {
this.factoryFromKafkaListenerAnnotation = factoryFromKafkaListenerAnnotation;
this.config = config;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Key key = (Key) o;
return Objects.equals(this.factoryFromKafkaListenerAnnotation, key.factoryFromKafkaListenerAnnotation)
&& Objects.equals(this.config, key.config);
}
@Override
public int hashCode() {
return Objects.hash(this.factoryFromKafkaListenerAnnotation, this.config);
}
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/RetryTopicBootstrapper.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.time.Clock;
import java.util.function.Supplier;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.beans.factory.config.SingletonBeanRegistry;
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.core.task.TaskExecutor;
import org.springframework.kafka.listener.KafkaBackOffManagerFactory;
import org.springframework.kafka.listener.KafkaConsumerBackoffManager;
import org.springframework.kafka.listener.KafkaConsumerTimingAdjuster;
import org.springframework.kafka.listener.PartitionPausingBackOffManagerFactory;
import org.springframework.retry.backoff.ThreadWaitSleeper;
/**
*
* Bootstraps the {@link RetryTopicConfigurer} context, registering the dependency
* beans and configuring the {@link org.springframework.context.ApplicationListener}s.
*
* Note that if a bean with the same name already exists in the context that one will
* be used instead.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public class RetryTopicBootstrapper {
private final ApplicationContext applicationContext;
private final BeanFactory beanFactory;
public RetryTopicBootstrapper(ApplicationContext applicationContext, BeanFactory beanFactory) {
if (!ConfigurableApplicationContext.class.isAssignableFrom(applicationContext.getClass()) ||
!BeanDefinitionRegistry.class.isAssignableFrom(applicationContext.getClass())) {
throw new IllegalStateException(String.format("ApplicationContext must be implement %s and %s interfaces. Provided: %s",
ConfigurableApplicationContext.class.getSimpleName(),
BeanDefinitionRegistry.class.getSimpleName(),
applicationContext.getClass().getSimpleName()));
}
if (!SingletonBeanRegistry.class.isAssignableFrom(beanFactory.getClass())) {
throw new IllegalStateException("BeanFactory must implement " + SingletonBeanRegistry.class +
" interface. Provided: " + beanFactory.getClass().getSimpleName());
}
this.beanFactory = beanFactory;
this.applicationContext = applicationContext;
}
public void bootstrapRetryTopic() {
registerBeans();
registerSingletons();
addApplicationListeners();
}
private void registerBeans() {
registerIfNotContains(RetryTopicInternalBeanNames.LISTENER_CONTAINER_FACTORY_RESOLVER_NAME,
ListenerContainerFactoryResolver.class);
registerIfNotContains(RetryTopicInternalBeanNames.DESTINATION_TOPIC_PROCESSOR_NAME,
DefaultDestinationTopicProcessor.class);
registerIfNotContains(RetryTopicInternalBeanNames.LISTENER_CONTAINER_FACTORY_CONFIGURER_NAME,
ListenerContainerFactoryConfigurer.class);
registerIfNotContains(RetryTopicInternalBeanNames.DEAD_LETTER_PUBLISHING_RECOVERER_FACTORY_BEAN_NAME,
DeadLetterPublishingRecovererFactory.class);
registerIfNotContains(RetryTopicInternalBeanNames.RETRY_TOPIC_CONFIGURER, RetryTopicConfigurer.class);
registerIfNotContains(RetryTopicInternalBeanNames.DESTINATION_TOPIC_CONTAINER_NAME,
DefaultDestinationTopicResolver.class);
registerIfNotContains(RetryTopicInternalBeanNames.BACKOFF_SLEEPER_BEAN_NAME, ThreadWaitSleeper.class);
registerIfNotContains(RetryTopicInternalBeanNames.INTERNAL_KAFKA_CONSUMER_BACKOFF_MANAGER_FACTORY,
PartitionPausingBackOffManagerFactory.class);
// Register a RetryTopicNamesProviderFactory implementation only if none is already present in the context
try {
this.applicationContext.getBean(RetryTopicNamesProviderFactory.class);
}
catch (NoSuchBeanDefinitionException e) {
((BeanDefinitionRegistry) this.applicationContext).registerBeanDefinition(
RetryTopicInternalBeanNames.RETRY_TOPIC_NAMES_PROVIDER_FACTORY,
new RootBeanDefinition(SuffixingRetryTopicNamesProviderFactory.class));
}
}
private void registerSingletons() {
registerSingletonIfNotContains(RetryTopicInternalBeanNames.INTERNAL_BACKOFF_CLOCK_BEAN_NAME, Clock::systemUTC);
registerSingletonIfNotContains(RetryTopicInternalBeanNames.KAFKA_CONSUMER_BACKOFF_MANAGER,
this::createKafkaConsumerBackoffManager);
}
private void addApplicationListeners() {
((ConfigurableApplicationContext) this.applicationContext)
.addApplicationListener(this.applicationContext.getBean(
RetryTopicInternalBeanNames.DESTINATION_TOPIC_CONTAINER_NAME, DefaultDestinationTopicResolver.class));
}
private KafkaConsumerBackoffManager createKafkaConsumerBackoffManager() {
KafkaBackOffManagerFactory factory = this.applicationContext
.getBean(RetryTopicInternalBeanNames.INTERNAL_KAFKA_CONSUMER_BACKOFF_MANAGER_FACTORY,
KafkaBackOffManagerFactory.class);
if (ApplicationContextAware.class.isAssignableFrom(factory.getClass())) {
((ApplicationContextAware) factory).setApplicationContext(this.applicationContext);
}
if (PartitionPausingBackOffManagerFactory.class.isAssignableFrom(factory.getClass())) {
setupTimingAdjustingBackOffFactory((PartitionPausingBackOffManagerFactory) factory);
}
return factory.create();
}
private void setupTimingAdjustingBackOffFactory(PartitionPausingBackOffManagerFactory factory) {
if (this.applicationContext.containsBean(RetryTopicInternalBeanNames.BACKOFF_TASK_EXECUTOR)) {
factory.setTaskExecutor(this.applicationContext
.getBean(RetryTopicInternalBeanNames.BACKOFF_TASK_EXECUTOR, TaskExecutor.class));
}
if (this.applicationContext.containsBean(
RetryTopicInternalBeanNames.INTERNAL_BACKOFF_TIMING_ADJUSTMENT_MANAGER)) {
factory.setTimingAdjustmentManager(this.applicationContext
.getBean(RetryTopicInternalBeanNames.INTERNAL_BACKOFF_TIMING_ADJUSTMENT_MANAGER,
KafkaConsumerTimingAdjuster.class));
}
}
private void registerIfNotContains(String beanName, Class<?> beanClass) {
BeanDefinitionRegistry registry = (BeanDefinitionRegistry) this.applicationContext;
if (!registry.containsBeanDefinition(beanName)) {
registry.registerBeanDefinition(beanName,
new RootBeanDefinition(beanClass));
}
}
private void registerSingletonIfNotContains(String beanName, Supplier<Object> singletonSupplier) {
if (!this.applicationContext.containsBeanDefinition(beanName)) {
((SingletonBeanRegistry) this.beanFactory).registerSingleton(beanName, singletonSupplier.get());
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/RetryTopicConfiguration.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.util.List;
import org.springframework.kafka.support.AllowDenyCollectionManager;
import org.springframework.kafka.support.EndpointHandlerMethod;
/**
* Contains the provided configuration for the retryable topics.
*
* Should be created via the {@link RetryTopicConfigurationBuilder}.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @since 2.7
*
*/
public class RetryTopicConfiguration {
private final List<DestinationTopic.Properties> destinationTopicProperties;
private final AllowDenyCollectionManager<String> topicAllowListManager;
private final EndpointHandlerMethod dltHandlerMethod;
private final TopicCreation kafkaTopicAutoCreationConfig;
private final ListenerContainerFactoryResolver.Configuration factoryResolverConfig;
private final ListenerContainerFactoryConfigurer.Configuration factoryConfigurerConfig;
RetryTopicConfiguration(List<DestinationTopic.Properties> destinationTopicProperties,
EndpointHandlerMethod dltHandlerMethod,
TopicCreation kafkaTopicAutoCreationConfig,
AllowDenyCollectionManager<String> topicAllowListManager,
ListenerContainerFactoryResolver.Configuration factoryResolverConfig,
ListenerContainerFactoryConfigurer.Configuration factoryConfigurerConfig) {
this.destinationTopicProperties = destinationTopicProperties;
this.dltHandlerMethod = dltHandlerMethod;
this.kafkaTopicAutoCreationConfig = kafkaTopicAutoCreationConfig;
this.topicAllowListManager = topicAllowListManager;
this.factoryResolverConfig = factoryResolverConfig;
this.factoryConfigurerConfig = factoryConfigurerConfig;
}
public boolean hasConfigurationForTopics(String[] topics) {
return this.topicAllowListManager.areAllowed(topics);
}
public TopicCreation forKafkaTopicAutoCreation() {
return this.kafkaTopicAutoCreationConfig;
}
public ListenerContainerFactoryResolver.Configuration forContainerFactoryResolver() {
return this.factoryResolverConfig;
}
public ListenerContainerFactoryConfigurer.Configuration forContainerFactoryConfigurer() {
return this.factoryConfigurerConfig;
}
public EndpointHandlerMethod getDltHandlerMethod() {
return this.dltHandlerMethod;
}
public List<DestinationTopic.Properties> getDestinationTopicProperties() {
return this.destinationTopicProperties;
}
static class TopicCreation {
private final boolean shouldCreateTopics;
private final int numPartitions;
private final short replicationFactor;
TopicCreation(boolean shouldCreate, int numPartitions, short replicationFactor) {
this.shouldCreateTopics = shouldCreate;
this.numPartitions = numPartitions;
this.replicationFactor = replicationFactor;
}
TopicCreation() {
this.shouldCreateTopics = true;
this.numPartitions = 1;
this.replicationFactor = 1;
}
TopicCreation(boolean shouldCreateTopics) {
this.shouldCreateTopics = shouldCreateTopics;
this.numPartitions = 1;
this.replicationFactor = 1;
}
public int getNumPartitions() {
return this.numPartitions;
}
public short getReplicationFactor() {
return this.replicationFactor;
}
public boolean shouldCreateTopics() {
return this.shouldCreateTopics;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/RetryTopicConfigurationBuilder.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.util.ArrayList;
import java.util.List;
import org.springframework.classify.BinaryExceptionClassifier;
import org.springframework.classify.BinaryExceptionClassifierBuilder;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.KafkaOperations;
import org.springframework.kafka.support.AllowDenyCollectionManager;
import org.springframework.kafka.support.EndpointHandlerMethod;
import org.springframework.lang.Nullable;
import org.springframework.retry.backoff.BackOffPolicy;
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
import org.springframework.retry.backoff.ExponentialRandomBackOffPolicy;
import org.springframework.retry.backoff.FixedBackOffPolicy;
import org.springframework.retry.backoff.NoBackOffPolicy;
import org.springframework.retry.backoff.SleepingBackOffPolicy;
import org.springframework.retry.backoff.UniformRandomBackOffPolicy;
import org.springframework.util.Assert;
/**
*
* Builder class to create {@link RetryTopicConfiguration} instances.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @since 2.7
*
*/
public class RetryTopicConfigurationBuilder {
private static final String ALREADY_SELECTED = "You have already selected backoff policy";
private final List<String> includeTopicNames = new ArrayList<>();
private final List<String> excludeTopicNames = new ArrayList<>();
private int maxAttempts = RetryTopicConstants.NOT_SET;
private BackOffPolicy backOffPolicy;
private EndpointHandlerMethod dltHandlerMethod;
private String retryTopicSuffix;
private String dltSuffix;
private RetryTopicConfiguration.TopicCreation topicCreationConfiguration = new RetryTopicConfiguration.TopicCreation();
private ConcurrentKafkaListenerContainerFactory<?, ?> listenerContainerFactory;
private String listenerContainerFactoryName;
private BinaryExceptionClassifierBuilder classifierBuilder;
private FixedDelayStrategy fixedDelayStrategy = FixedDelayStrategy.MULTIPLE_TOPICS;
private DltStrategy dltStrategy = DltStrategy.ALWAYS_RETRY_ON_ERROR;
private long timeout = RetryTopicConstants.NOT_SET;
private TopicSuffixingStrategy topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_DELAY_VALUE;
private Boolean autoStartDltHandler;
/* ---------------- DLT Behavior -------------- */
/**
* Configure a DLT handler method.
* @param clazz the class containing the method.
* @param methodName the method name.
* @return the builder.
* @deprecated in favor of {@link #dltHandlerMethod(String, String)}.
*/
@Deprecated
public RetryTopicConfigurationBuilder dltHandlerMethod(Class<?> clazz, String methodName) {
this.dltHandlerMethod = RetryTopicConfigurer.createHandlerMethodWith(clazz, methodName);
return this;
}
/**
* Configure a DLT handler method.
* @param beanName the bean name.
* @param methodName the method name.
* @return the builder.
* @since 2.8
*/
public RetryTopicConfigurationBuilder dltHandlerMethod(String beanName, String methodName) {
this.dltHandlerMethod = RetryTopicConfigurer.createHandlerMethodWith(beanName, methodName);
return this;
}
public RetryTopicConfigurationBuilder dltHandlerMethod(
EndpointHandlerMethod endpointHandlerMethod) {
this.dltHandlerMethod = endpointHandlerMethod;
return this;
}
public RetryTopicConfigurationBuilder doNotRetryOnDltFailure() {
this.dltStrategy =
DltStrategy.FAIL_ON_ERROR;
return this;
}
public RetryTopicConfigurationBuilder dltProcessingFailureStrategy(
DltStrategy dltStrategy) {
this.dltStrategy = dltStrategy;
return this;
}
public RetryTopicConfigurationBuilder doNotConfigureDlt() {
this.dltStrategy =
DltStrategy.NO_DLT;
return this;
}
/**
* Set to false to not start the DLT handler (configured or default); overrides
* the container factory's autoStartup property.
* @param autoStart false to not auto start.
* @return this builder.
* @since 2.8
*/
public RetryTopicConfigurationBuilder autoStartDltHandler(@Nullable Boolean autoStart) {
this.autoStartDltHandler = autoStart;
return this;
}
/* ---------------- Configure Topic GateKeeper -------------- */
public RetryTopicConfigurationBuilder includeTopics(List<String> topicNames) {
this.includeTopicNames.addAll(topicNames);
return this;
}
public RetryTopicConfigurationBuilder excludeTopics(List<String> topicNames) {
this.excludeTopicNames.addAll(topicNames);
return this;
}
public RetryTopicConfigurationBuilder includeTopic(String topicName) {
this.includeTopicNames.add(topicName);
return this;
}
public RetryTopicConfigurationBuilder excludeTopic(String topicName) {
this.excludeTopicNames.add(topicName);
return this;
}
/* ---------------- Configure Topic Suffixes -------------- */
public RetryTopicConfigurationBuilder retryTopicSuffix(String suffix) {
this.retryTopicSuffix = suffix;
return this;
}
public RetryTopicConfigurationBuilder dltSuffix(String suffix) {
this.dltSuffix = suffix;
return this;
}
public RetryTopicConfigurationBuilder suffixTopicsWithIndexValues() {
this.topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE;
return this;
}
public RetryTopicConfigurationBuilder setTopicSuffixingStrategy(TopicSuffixingStrategy topicSuffixingStrategy) {
this.topicSuffixingStrategy = topicSuffixingStrategy;
return this;
}
/* ---------------- Configure BackOff -------------- */
public RetryTopicConfigurationBuilder maxAttempts(int maxAttempts) {
Assert.isTrue(maxAttempts > 0, "Number of attempts should be positive");
Assert.isTrue(this.maxAttempts == RetryTopicConstants.NOT_SET,
"You have already set the number of attempts");
this.maxAttempts = maxAttempts;
return this;
}
public RetryTopicConfigurationBuilder timeoutAfter(long timeout) {
this.timeout = timeout;
return this;
}
public RetryTopicConfigurationBuilder exponentialBackoff(long initialInterval, double multiplier, long maxInterval) {
return exponentialBackoff(initialInterval, multiplier, maxInterval, false);
}
public RetryTopicConfigurationBuilder exponentialBackoff(long initialInterval, double multiplier, long maxInterval,
boolean withRandom) {
Assert.isNull(this.backOffPolicy, ALREADY_SELECTED);
Assert.isTrue(initialInterval >= 1, "Initial interval should be >= 1");
Assert.isTrue(multiplier > 1, "Multiplier should be > 1");
Assert.isTrue(maxInterval > initialInterval, "Max interval should be > than initial interval");
ExponentialBackOffPolicy policy = withRandom ? new ExponentialRandomBackOffPolicy()
: new ExponentialBackOffPolicy();
policy.setInitialInterval(initialInterval);
policy.setMultiplier(multiplier);
policy.setMaxInterval(maxInterval);
this.backOffPolicy = policy;
return this;
}
public RetryTopicConfigurationBuilder fixedBackOff(long interval) {
Assert.isNull(this.backOffPolicy, ALREADY_SELECTED);
Assert.isTrue(interval >= 1, "Interval should be >= 1");
FixedBackOffPolicy policy = new FixedBackOffPolicy();
policy.setBackOffPeriod(interval);
this.backOffPolicy = policy;
return this;
}
public RetryTopicConfigurationBuilder uniformRandomBackoff(long minInterval, long maxInterval) {
Assert.isNull(this.backOffPolicy, ALREADY_SELECTED);
Assert.isTrue(minInterval >= 1, "Min interval should be >= 1");
Assert.isTrue(maxInterval >= 1, "Max interval should be >= 1");
Assert.isTrue(maxInterval > minInterval, "Max interval should be > than min interval");
UniformRandomBackOffPolicy policy = new UniformRandomBackOffPolicy();
policy.setMinBackOffPeriod(minInterval);
policy.setMaxBackOffPeriod(maxInterval);
this.backOffPolicy = policy;
return this;
}
public RetryTopicConfigurationBuilder noBackoff() {
Assert.isNull(this.backOffPolicy, ALREADY_SELECTED);
this.backOffPolicy = new NoBackOffPolicy();
return this;
}
public RetryTopicConfigurationBuilder customBackoff(SleepingBackOffPolicy<?> backOffPolicy) {
Assert.isNull(this.backOffPolicy, ALREADY_SELECTED);
Assert.notNull(backOffPolicy, "You should provide non null custom policy");
this.backOffPolicy = backOffPolicy;
return this;
}
public RetryTopicConfigurationBuilder fixedBackOff(int interval) {
FixedBackOffPolicy policy = new FixedBackOffPolicy();
policy.setBackOffPeriod(interval);
this.backOffPolicy = policy;
return this;
}
public RetryTopicConfigurationBuilder useSingleTopicForFixedDelays() {
this.fixedDelayStrategy = FixedDelayStrategy.SINGLE_TOPIC;
return this;
}
public RetryTopicConfigurationBuilder useSingleTopicForFixedDelays(FixedDelayStrategy useSameTopicForFixedDelays) {
this.fixedDelayStrategy = useSameTopicForFixedDelays;
return this;
}
/* ---------------- Configure Topics Auto Creation -------------- */
public RetryTopicConfigurationBuilder doNotAutoCreateRetryTopics() {
this.topicCreationConfiguration = new RetryTopicConfiguration.TopicCreation(false);
return this;
}
public RetryTopicConfigurationBuilder autoCreateTopicsWith(int numPartitions, short replicationFactor) {
this.topicCreationConfiguration = new RetryTopicConfiguration.TopicCreation(true, numPartitions,
replicationFactor);
return this;
}
public RetryTopicConfigurationBuilder autoCreateTopics(boolean shouldCreate, int numPartitions,
short replicationFactor) {
this.topicCreationConfiguration = new RetryTopicConfiguration.TopicCreation(shouldCreate, numPartitions,
replicationFactor);
return this;
}
/* ---------------- Configure Exception Classifier -------------- */
public RetryTopicConfigurationBuilder retryOn(Class<? extends Throwable> throwable) {
classifierBuilder().retryOn(throwable);
return this;
}
public RetryTopicConfigurationBuilder notRetryOn(Class<? extends Throwable> throwable) {
classifierBuilder().notRetryOn(throwable);
return this;
}
public RetryTopicConfigurationBuilder retryOn(List<Class<? extends Throwable>> throwables) {
throwables
.stream()
.forEach(throwable -> classifierBuilder().retryOn(throwable));
return this;
}
public RetryTopicConfigurationBuilder notRetryOn(List<Class<? extends Throwable>> throwables) {
throwables
.stream()
.forEach(throwable -> classifierBuilder().notRetryOn(throwable));
return this;
}
public RetryTopicConfigurationBuilder traversingCauses() {
classifierBuilder().traversingCauses();
return this;
}
public RetryTopicConfigurationBuilder traversingCauses(boolean traversing) {
if (traversing) {
classifierBuilder().traversingCauses();
}
return this;
}
private BinaryExceptionClassifierBuilder classifierBuilder() {
if (this.classifierBuilder == null) {
this.classifierBuilder = new BinaryExceptionClassifierBuilder();
}
return this.classifierBuilder;
}
/* ---------------- Configure KafkaListenerContainerFactory -------------- */
public RetryTopicConfigurationBuilder listenerFactory(ConcurrentKafkaListenerContainerFactory<?, ?> factory) {
this.listenerContainerFactory = factory;
return this;
}
public RetryTopicConfigurationBuilder listenerFactory(String factoryBeanName) {
this.listenerContainerFactoryName = factoryBeanName;
return this;
}
// The templates are configured per ListenerContainerFactory. Only the first configured ones will be used.
public RetryTopicConfiguration create(KafkaOperations<?, ?> sendToTopicKafkaTemplate) {
ListenerContainerFactoryResolver.Configuration factoryResolverConfig =
new ListenerContainerFactoryResolver.Configuration(this.listenerContainerFactory,
this.listenerContainerFactoryName);
AllowDenyCollectionManager<String> allowListManager =
new AllowDenyCollectionManager<>(this.includeTopicNames, this.excludeTopicNames);
List<Long> backOffValues = new BackOffValuesGenerator(this.maxAttempts, this.backOffPolicy).generateValues();
ListenerContainerFactoryConfigurer.Configuration factoryConfigurerConfig =
new ListenerContainerFactoryConfigurer.Configuration(backOffValues);
List<DestinationTopic.Properties> destinationTopicProperties =
new DestinationTopicPropertiesFactory(this.retryTopicSuffix, this.dltSuffix, backOffValues,
buildClassifier(), this.topicCreationConfiguration.getNumPartitions(),
sendToTopicKafkaTemplate, this.fixedDelayStrategy, this.dltStrategy,
this.topicSuffixingStrategy, this.timeout)
.autoStartDltHandler(this.autoStartDltHandler)
.createProperties();
return new RetryTopicConfiguration(destinationTopicProperties,
this.dltHandlerMethod, this.topicCreationConfiguration, allowListManager,
factoryResolverConfig, factoryConfigurerConfig);
}
private BinaryExceptionClassifier buildClassifier() {
return this.classifierBuilder != null
? this.classifierBuilder.build()
: new BinaryExceptionClassifierBuilder().retryOn(Throwable.class).build();
}
/**
* Create a new instance of the builder.
* @return the new instance.
*/
public static RetryTopicConfigurationBuilder newInstance() {
return new RetryTopicConfigurationBuilder();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/RetryTopicConfigurer.java | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import java.lang.reflect.Method;
import java.util.Collection;
import java.util.function.Consumer;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.support.DefaultListableBeanFactory;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerEndpointRegistrar;
import org.springframework.kafka.config.MethodKafkaListenerEndpoint;
import org.springframework.kafka.config.MultiMethodKafkaListenerEndpoint;
import org.springframework.kafka.listener.ListenerUtils;
import org.springframework.kafka.support.EndpointHandlerMethod;
import org.springframework.lang.Nullable;
/**
*
* <p>Configures main, retry and DLT topics based on a main endpoint and provided
* configurations to acomplish a distributed retry / DLT pattern in a non-blocking
* fashion, at the expense of ordering guarantees.
*
* <p>To illustrate, if you have a "main-topic" topic, and want an exponential backoff
* of 1000ms with a multiplier of 2 and 3 retry attempts, it will create the
* main-topic-retry-1000, main-topic-retry-2000, main-topic-retry-4000 and main-topic-dlt
* topics. The configuration can be achieved using a {@link RetryTopicConfigurationBuilder}
* to create one or more {@link RetryTopicConfigurer} beans, or by using the
* {@link org.springframework.kafka.annotation.RetryableTopic} annotation.
* More details on usage below.
*
*
* <p>How it works:
*
* <p>If a message processing throws an exception, the configured
* {@link org.springframework.kafka.listener.SeekToCurrentErrorHandler}
* and {@link org.springframework.kafka.listener.DeadLetterPublishingRecoverer} forwards the message to the next topic, using a
* {@link org.springframework.kafka.retrytopic.DestinationTopicResolver}
* to know the next topic and the delay for it.
*
* <p>Each forwareded record has a back off timestamp header and, if consumption is
* attempted by the {@link org.springframework.kafka.listener.adapter.KafkaBackoffAwareMessageListenerAdapter}
* before that time, the partition consumption is paused by a
* {@link org.springframework.kafka.listener.KafkaConsumerBackoffManager} and a
* {@link org.springframework.kafka.listener.KafkaBackoffException} is thrown.
*
* <p>When the partition has been idle for the amount of time specified in the
* ContainerProperties' idlePartitionEventInterval property.
* property, a {@link org.springframework.kafka.event.ListenerContainerPartitionIdleEvent}
* is published, which the {@link org.springframework.kafka.listener.KafkaConsumerBackoffManager}
* listens to in order to check whether or not it should unpause the partition.
*
* <p>If, when consumption is resumed, processing fails again, the message is forwarded to
* the next topic and so on, until it gets to the dlt.
*
* <p>Considering Kafka's partition ordering guarantees, and each topic having a fixed
* delay time, we know that the first message consumed in a given retry topic partition will
* be the one with the earliest backoff timestamp for that partition, so by pausing the
* partition we know we're not delaying message processing in other partitions longer than
* necessary.
*
*
* <p>Usages:
*
* <p>There are two main ways for configuring the endpoints. The first is by providing one or more
* {@link org.springframework.context.annotation.Bean}s in a {@link org.springframework.context.annotation.Configuration}
* annotated class, such as:
*
* <pre>
* <code>@Bean</code>
* <code>public RetryTopicConfiguration myRetryableTopic(KafkaTemplate<String, Object> template) {
* return RetryTopicConfigurationBuilder
* .newInstance()
* .create(template);
* }</code>
* </pre>
* <p>This will create retry and dlt topics for all topics in methods annotated with
* {@link org.springframework.kafka.annotation.KafkaListener}, as well as its consumers,
* using the default configurations. If message processing fails it will forward the message
* to the next topic until it gets to the DLT topic.
*
* A {@link org.springframework.kafka.core.KafkaOperations} instance is required for message forwarding.
*
* <p>For more fine-grained control over how to handle retrials for each topic, more then one bean can be provided, such as:
*
* <pre>
* <code>@Bean
* public RetryTopicConfiguration myRetryableTopic(KafkaTemplate<String, MyPojo> template) {
* return RetryTopicConfigurationBuilder
* .newInstance()
* .fixedBackoff(3000)
* .maxAttempts(5)
* .includeTopics("my-topic", "my-other-topic")
* .create(template);
* }</code>
* </pre>
* <pre>
* <code>@Bean
* public RetryTopicConfiguration myOtherRetryableTopic(KafkaTemplate<String, MyPojo> template) {
* return RetryTopicConfigurationBuilder
* .newInstance()
* .exponentialBackoff(1000, 2, 5000)
* .maxAttempts(4)
* .excludeTopics("my-topic", "my-other-topic")
* .retryOn(MyException.class)
* .create(template);
* }</code>
* </pre>
* <p>Some other options include: auto-creation of topics, backoff,
* retryOn / notRetryOn / transversing as in {@link org.springframework.retry.support.RetryTemplate},
* single-topic fixed backoff processing, custom dlt listener beans, custom topic
* suffixes and providing specific listenerContainerFactories.
*
* <p>The other, non-exclusive way to configure the endpoints is through the convenient
* {@link org.springframework.kafka.annotation.RetryableTopic} annotation, that can be placed on any
* {@link org.springframework.kafka.annotation.KafkaListener} annotated methods, such as:
*
* <pre>
* <code>@RetryableTopic(attempts = 3,
* backoff = @Backoff(delay = 700, maxDelay = 12000, multiplier = 3))</code>
* <code>@KafkaListener(topics = "my-annotated-topic")
* public void processMessage(MyPojo message) {
* // ... message processing
* }</code>
*</pre>
* <p> The same configurations are available in the annotation and the builder approaches, and both can be
* used concurrently. In case the same method / topic can be handled by both, the annotation takes precedence.
*
* <p>DLT Handling:
*
* <p>The DLT handler method can be provided through the
* {@link RetryTopicConfigurationBuilder#dltHandlerMethod(Class, String)} method,
* providing the class and method name that should handle the DLT topic. If a bean
* instance of this type is found in the {@link BeanFactory} it is the instance used.
* If not an instance is created. The class can use dependency injection as a normal bean.
*
* <pre>
* <code>@Bean
* public RetryTopicConfiguration otherRetryTopic(KafkaTemplate<Integer, MyPojo> template) {
* return RetryTopicConfigurationBuilder
* .newInstance()
* .dltProcessor(MyCustomDltProcessor.class, "processDltMessage")
* .create(template);
* }</code>
*
* <code>@Component
* public class MyCustomDltProcessor {
*
* public void processDltMessage(MyPojo message) {
* // ... message processing, persistence, etc
* }
* }</code>
* </pre>
*
* The other way to provide the DLT handler method is through the
* {@link org.springframework.kafka.annotation.DltHandler} annotation,
* that should be used within the same class as the correspondent
* {@link org.springframework.kafka.annotation.KafkaListener}.
*
* <pre>
* <code>@DltHandler
* public void processMessage(MyPojo message) {
* // ... message processing, persistence, etc
* }</code>
*</pre>
*
* If no DLT handler is provided, the default {@link LoggingDltListenerHandlerMethod} is used.
*
* @author Tomaz Fernandes
* @since 2.7
*
* @see RetryTopicConfigurationBuilder
* @see org.springframework.kafka.annotation.RetryableTopic
* @see org.springframework.kafka.annotation.KafkaListener
* @see org.springframework.retry.annotation.Backoff
* @see org.springframework.kafka.listener.SeekToCurrentErrorHandler
* @see org.springframework.kafka.listener.DeadLetterPublishingRecoverer
*
*/
public class RetryTopicConfigurer {
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(RetryTopicConfigurer.class));
/**
* The default method to handle messages in the DLT.
*/
public static final EndpointHandlerMethod DEFAULT_DLT_HANDLER = createHandlerMethodWith(LoggingDltListenerHandlerMethod.class,
LoggingDltListenerHandlerMethod.DEFAULT_DLT_METHOD_NAME);
private final DestinationTopicProcessor destinationTopicProcessor;
private final ListenerContainerFactoryResolver containerFactoryResolver;
private final ListenerContainerFactoryConfigurer listenerContainerFactoryConfigurer;
private final BeanFactory beanFactory;
private final RetryTopicNamesProviderFactory retryTopicNamesProviderFactory;
private boolean useLegacyFactoryConfigurer = false;
@Deprecated
public RetryTopicConfigurer(DestinationTopicProcessor destinationTopicProcessor,
ListenerContainerFactoryResolver containerFactoryResolver,
ListenerContainerFactoryConfigurer listenerContainerFactoryConfigurer,
BeanFactory beanFactory) {
this(destinationTopicProcessor, containerFactoryResolver, listenerContainerFactoryConfigurer, beanFactory, new SuffixingRetryTopicNamesProviderFactory());
}
/**
* Create an instance with the provided properties.
* @param destinationTopicProcessor the destination topic processor.
* @param containerFactoryResolver the container factory resolver.
* @param listenerContainerFactoryConfigurer the container factory configurer.
* @param beanFactory the bean factory.
* @param retryTopicNamesProviderFactory the retry topic names factory.
*/
@Autowired
public RetryTopicConfigurer(DestinationTopicProcessor destinationTopicProcessor,
ListenerContainerFactoryResolver containerFactoryResolver,
ListenerContainerFactoryConfigurer listenerContainerFactoryConfigurer,
BeanFactory beanFactory,
RetryTopicNamesProviderFactory retryTopicNamesProviderFactory) {
this.destinationTopicProcessor = destinationTopicProcessor;
this.containerFactoryResolver = containerFactoryResolver;
this.listenerContainerFactoryConfigurer = listenerContainerFactoryConfigurer;
this.beanFactory = beanFactory;
this.retryTopicNamesProviderFactory = retryTopicNamesProviderFactory;
}
/**
* Entrypoint for creating and configuring the retry and dlt endpoints, as well as the
* container factory that will create the corresponding listenerContainer.
* @param endpointProcessor function that will process the endpoints
* processListener method.
* @param mainEndpoint the endpoint based on which retry and dlt endpoints are also
* created and processed.
* @param configuration the configuration for the topic.
* @param registrar The {@link KafkaListenerEndpointRegistrar} that will register the endpoints.
* @param factory The factory provided in the {@link org.springframework.kafka.annotation.KafkaListener}
* @param defaultContainerFactoryBeanName The default factory bean name for the
* {@link org.springframework.kafka.annotation.KafkaListener}
*
*/
public void processMainAndRetryListeners(EndpointProcessor endpointProcessor,
MethodKafkaListenerEndpoint<?, ?> mainEndpoint,
RetryTopicConfiguration configuration,
KafkaListenerEndpointRegistrar registrar,
@Nullable KafkaListenerContainerFactory<?> factory,
String defaultContainerFactoryBeanName) {
throwIfMultiMethodEndpoint(mainEndpoint);
DestinationTopicProcessor.Context context =
new DestinationTopicProcessor.Context(configuration.getDestinationTopicProperties());
configureEndpoints(mainEndpoint, endpointProcessor, factory, registrar, configuration, context,
defaultContainerFactoryBeanName);
this.destinationTopicProcessor.processRegisteredDestinations(getTopicCreationFunction(configuration), context);
}
private void configureEndpoints(MethodKafkaListenerEndpoint<?, ?> mainEndpoint,
EndpointProcessor endpointProcessor,
KafkaListenerContainerFactory<?> factory,
KafkaListenerEndpointRegistrar registrar,
RetryTopicConfiguration configuration,
DestinationTopicProcessor.Context context,
String defaultContainerFactoryBeanName) {
this.destinationTopicProcessor
.processDestinationTopicProperties(destinationTopicProperties ->
processAndRegisterEndpoint(mainEndpoint,
endpointProcessor,
factory,
defaultContainerFactoryBeanName,
registrar,
configuration,
context,
destinationTopicProperties),
context);
}
private void processAndRegisterEndpoint(MethodKafkaListenerEndpoint<?, ?> mainEndpoint, EndpointProcessor endpointProcessor,
KafkaListenerContainerFactory<?> factory,
String defaultFactoryBeanName,
KafkaListenerEndpointRegistrar registrar,
RetryTopicConfiguration configuration, DestinationTopicProcessor.Context context,
DestinationTopic.Properties destinationTopicProperties) {
KafkaListenerContainerFactory<?> resolvedFactory =
destinationTopicProperties.isMainEndpoint()
? resolveAndConfigureFactoryForMainEndpoint(factory, defaultFactoryBeanName, configuration)
: resolveAndConfigureFactoryForRetryEndpoint(factory, defaultFactoryBeanName, configuration);
MethodKafkaListenerEndpoint<?, ?> endpoint = destinationTopicProperties.isMainEndpoint()
? mainEndpoint
: new MethodKafkaListenerEndpoint<>();
endpointProcessor.accept(endpoint);
EndpointHandlerMethod endpointBeanMethod =
getEndpointHandlerMethod(mainEndpoint, configuration, destinationTopicProperties);
createEndpointCustomizer(endpointBeanMethod, destinationTopicProperties)
.customizeEndpointAndCollectTopics(endpoint)
.forEach(topicNamesHolder ->
this.destinationTopicProcessor
.registerDestinationTopic(topicNamesHolder.getMainTopic(),
topicNamesHolder.getCustomizedTopic(),
destinationTopicProperties, context));
registrar.registerEndpoint(endpoint, resolvedFactory);
endpoint.setBeanFactory(this.beanFactory);
}
protected EndpointHandlerMethod getEndpointHandlerMethod(MethodKafkaListenerEndpoint<?, ?> mainEndpoint,
RetryTopicConfiguration configuration,
DestinationTopic.Properties props) {
EndpointHandlerMethod dltHandlerMethod = configuration.getDltHandlerMethod();
EndpointHandlerMethod retryBeanMethod = new EndpointHandlerMethod(mainEndpoint.getBean(), mainEndpoint.getMethod());
return props.isDltTopic() ? getDltEndpointHandlerMethodOrDefault(dltHandlerMethod) : retryBeanMethod;
}
private Consumer<Collection<String>> getTopicCreationFunction(RetryTopicConfiguration config) {
RetryTopicConfiguration.TopicCreation topicCreationConfig = config.forKafkaTopicAutoCreation();
return topicCreationConfig.shouldCreateTopics()
? topics -> createNewTopicBeans(topics, topicCreationConfig)
: topics -> { };
}
protected void createNewTopicBeans(Collection<String> topics, RetryTopicConfiguration.TopicCreation config) {
topics.forEach(topic ->
((DefaultListableBeanFactory) this.beanFactory)
.registerSingleton(topic + "-topicRegistrationBean",
new NewTopic(topic, config.getNumPartitions(), config.getReplicationFactor()))
);
}
protected EndpointCustomizer createEndpointCustomizer(
EndpointHandlerMethod endpointBeanMethod, DestinationTopic.Properties destinationTopicProperties) {
return new EndpointCustomizerFactory(destinationTopicProperties,
endpointBeanMethod,
this.beanFactory,
this.retryTopicNamesProviderFactory)
.createEndpointCustomizer();
}
private EndpointHandlerMethod getDltEndpointHandlerMethodOrDefault(EndpointHandlerMethod dltEndpointHandlerMethod) {
return dltEndpointHandlerMethod != null ? dltEndpointHandlerMethod : DEFAULT_DLT_HANDLER;
}
@SuppressWarnings("deprecation")
private KafkaListenerContainerFactory<?> resolveAndConfigureFactoryForMainEndpoint(
KafkaListenerContainerFactory<?> providedFactory,
String defaultFactoryBeanName, RetryTopicConfiguration configuration) {
ConcurrentKafkaListenerContainerFactory<?, ?> resolvedFactory = this.containerFactoryResolver
.resolveFactoryForMainEndpoint(providedFactory, defaultFactoryBeanName,
configuration.forContainerFactoryResolver());
return this.useLegacyFactoryConfigurer
? this.listenerContainerFactoryConfigurer
.configureWithoutBackOffValues(resolvedFactory, configuration.forContainerFactoryConfigurer())
: this.listenerContainerFactoryConfigurer
.decorateFactoryWithoutSettingContainerProperties(resolvedFactory,
configuration.forContainerFactoryConfigurer());
}
@SuppressWarnings("deprecation")
private KafkaListenerContainerFactory<?> resolveAndConfigureFactoryForRetryEndpoint(
KafkaListenerContainerFactory<?> providedFactory,
String defaultFactoryBeanName,
RetryTopicConfiguration configuration) {
ConcurrentKafkaListenerContainerFactory<?, ?> resolvedFactory =
this.containerFactoryResolver.resolveFactoryForRetryEndpoint(providedFactory, defaultFactoryBeanName,
configuration.forContainerFactoryResolver());
return this.useLegacyFactoryConfigurer
? this.listenerContainerFactoryConfigurer.configure(resolvedFactory,
configuration.forContainerFactoryConfigurer())
: this.listenerContainerFactoryConfigurer
.decorateFactory(resolvedFactory, configuration.forContainerFactoryConfigurer());
}
private void throwIfMultiMethodEndpoint(MethodKafkaListenerEndpoint<?, ?> mainEndpoint) {
if (mainEndpoint instanceof MultiMethodKafkaListenerEndpoint) {
throw new IllegalArgumentException("Retry Topic is not compatible with " + MultiMethodKafkaListenerEndpoint.class);
}
}
public static EndpointHandlerMethod createHandlerMethodWith(Object beanOrClass, String methodName) {
return new EndpointHandlerMethod(beanOrClass, methodName);
}
public static EndpointHandlerMethod createHandlerMethodWith(Object bean, Method method) {
return new EndpointHandlerMethod(bean, method);
}
/**
* Set to true if you want the {@link ListenerContainerFactoryConfigurer} to
* behave as before 2.8.3.
* @param useLegacyFactoryConfigurer Whether to use the legacy factory configuration.
* @deprecated for removal after the deprecated legacy configuration methods are removed.
*/
@Deprecated
public void useLegacyFactoryConfigurer(boolean useLegacyFactoryConfigurer) {
this.useLegacyFactoryConfigurer = useLegacyFactoryConfigurer;
}
public interface EndpointProcessor extends Consumer<MethodKafkaListenerEndpoint<?, ?>> {
default void process(MethodKafkaListenerEndpoint<?, ?> listenerEndpoint) {
accept(listenerEndpoint);
}
}
static class LoggingDltListenerHandlerMethod {
public static final String DEFAULT_DLT_METHOD_NAME = "logMessage";
@SuppressWarnings("deprecation")
public void logMessage(Object message) {
if (message instanceof ConsumerRecord) {
LOGGER.info(() -> "Received message in dlt listener: "
+ ListenerUtils.recordToString((ConsumerRecord<?, ?>) message));
}
else {
LOGGER.info(() -> "Received message in dlt listener.");
}
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/RetryTopicConstants.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
/**
*
* Constants for the RetryTopic functionality.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public abstract class RetryTopicConstants {
/**
* Default suffix for retry topics.
*/
public static final String DEFAULT_RETRY_SUFFIX = "-retry";
/**
* Default suffix for dlt.
*/
public static final String DEFAULT_DLT_SUFFIX = "-dlt";
/**
* Default number of times the message processing should be attempted,
* including the first.
*/
public static final int DEFAULT_MAX_ATTEMPTS = 3;
/**
* Constant to represent that the integer property is not set.
*/
public static final int NOT_SET = -1;
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/RetryTopicHeaders.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
/**
*
* Contains the headers that will be used in the forwarded messages.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public abstract class RetryTopicHeaders {
/**
* The default header for the backoff duetimestamp.
*/
public static final String DEFAULT_HEADER_BACKOFF_TIMESTAMP = "retry_topic-backoff-timestamp";
/**
* The default header for the attempts.
*/
public static final String DEFAULT_HEADER_ATTEMPTS = "retry_topic-attempts";
/**
* The default header for the original message's timestamp.
*/
public static final String DEFAULT_HEADER_ORIGINAL_TIMESTAMP = "retry_topic-original-timestamp";
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/RetryTopicInternalBeanNames.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
/**
*
* Contains the internal bean names that will be used by the retryable topic configuration.
*
* If you provide a bean of your own with the same name, your instance will be used instead
* of the default one.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public abstract class RetryTopicInternalBeanNames {
/**
* {@link DestinationTopicProcessor} bean name.
*/
public static final String DESTINATION_TOPIC_PROCESSOR_NAME = "internalDestinationTopicProcessor";
/**
* {@link org.springframework.kafka.listener.KafkaConsumerBackoffManager} bean name.
*/
public static final String KAFKA_CONSUMER_BACKOFF_MANAGER = "internalKafkaConsumerBackoffManager";
/**
* {@link ListenerContainerFactoryResolver} bean name.
*/
public static final String LISTENER_CONTAINER_FACTORY_RESOLVER_NAME = "internalListenerContainerFactoryResolver";
/**
* {@link ListenerContainerFactoryConfigurer} bean name.
*/
public static final String LISTENER_CONTAINER_FACTORY_CONFIGURER_NAME = "internalListenerContainerFactoryConfigurer";
/**
* {@link DeadLetterPublishingRecovererFactory} bean name.
*/
public static final String DEAD_LETTER_PUBLISHING_RECOVERER_FACTORY_BEAN_NAME =
"internalDeadLetterPublishingRecovererProvider";
/**
* {@link DeadLetterPublishingRecovererFactory} bean name.
* @deprecated in favor of {@link #DEAD_LETTER_PUBLISHING_RECOVERER_FACTORY_BEAN_NAME}
*/
@Deprecated
public static final String DEAD_LETTER_PUBLISHING_RECOVERER_PROVIDER_NAME =
DEAD_LETTER_PUBLISHING_RECOVERER_FACTORY_BEAN_NAME;
/**
* {@link DestinationTopicContainer} bean name.
*/
public static final String DESTINATION_TOPIC_CONTAINER_NAME = "internalDestinationTopicContainer";
/**
* The default {@link org.springframework.kafka.config.KafkaListenerContainerFactory}
* bean name that will be looked up if no other is provided.
*/
public static final String DEFAULT_LISTENER_FACTORY_BEAN_NAME = "internalRetryTopicListenerContainerFactory";
/**
* {@link org.springframework.retry.backoff.Sleeper} bean name.
*/
public static final String BACKOFF_SLEEPER_BEAN_NAME = "internalBackoffSleeper";
/**
* {@link org.springframework.core.task.TaskExecutor} bean name to be used.
* in the {@link org.springframework.kafka.listener.WakingKafkaConsumerTimingAdjuster}
*/
public static final String BACKOFF_TASK_EXECUTOR = "internalBackOffTaskExecutor";
/**
* {@link org.springframework.kafka.listener.KafkaConsumerTimingAdjuster} bean name.
*/
public static final String INTERNAL_BACKOFF_TIMING_ADJUSTMENT_MANAGER = "internalKafkaConsumerTimingAdjustmentManager";
/**
* {@link org.springframework.kafka.listener.KafkaBackOffManagerFactory} bean name.
*/
public static final String INTERNAL_KAFKA_CONSUMER_BACKOFF_MANAGER_FACTORY = "internalKafkaConsumerBackOffManagerFactory";
/**
* {@link RetryTopicNamesProviderFactory} bean name.
*/
public static final String RETRY_TOPIC_NAMES_PROVIDER_FACTORY = "internalRetryTopicNamesProviderFactory";
/**
* The {@link java.time.Clock} bean name that will be used for backing off partitions.
*/
public static final String INTERNAL_BACKOFF_CLOCK_BEAN_NAME = "internalBackOffClock";
/**
* Default {@link org.springframework.kafka.core.KafkaTemplate} bean name for publishing to retry topics.
*/
public static final String DEFAULT_KAFKA_TEMPLATE_BEAN_NAME = "retryTopicDefaultKafkaTemplate";
/**
* {@link RetryTopicBootstrapper} bean name.
*/
public static final String RETRY_TOPIC_BOOTSTRAPPER = "internalRetryTopicBootstrapper";
/**
* {@link RetryTopicConfigurer} bean name.
*/
public static final String RETRY_TOPIC_CONFIGURER = "internalRetryTopicConfigurer";
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/RetryTopicNamesProviderFactory.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import org.springframework.kafka.config.MethodKafkaListenerEndpoint;
/**
* Handles the naming related to the retry and dead letter topics.
*
* @author Andrea Polci
* @see SuffixingRetryTopicNamesProviderFactory
*/
public interface RetryTopicNamesProviderFactory {
RetryTopicNamesProvider createRetryTopicNamesProvider(DestinationTopic.Properties properties);
interface RetryTopicNamesProvider {
/**
* Return the endpoint id that will override the endpoint's current id.
*
* @param endpoint the endpoint to override
* @return The endpoint id
*/
String getEndpointId(MethodKafkaListenerEndpoint<?, ?> endpoint);
/**
* Return the groupId that will override the endpoint's groupId.
*
* @param endpoint the endpoint to override
* @return The groupId
*/
String getGroupId(MethodKafkaListenerEndpoint<?, ?> endpoint);
/**
* Return the clientId prefix that will override the endpoint's clientId prefix.
*
* @param endpoint the endpoint to override
* @return The clientId prefix
*/
String getClientIdPrefix(MethodKafkaListenerEndpoint<?, ?> endpoint);
/**
* Return the group that will override the endpoint's group.
*
* @param endpoint the endpoint to override
* @return The clientId prefix
*/
String getGroup(MethodKafkaListenerEndpoint<?, ?> endpoint);
/**
* Return the tropic name that will override the base topic name.
*
* @param topic the base topic name
* @return The topic name
*/
String getTopicName(String topic);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/SuffixingRetryTopicNamesProviderFactory.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
import org.springframework.kafka.config.MethodKafkaListenerEndpoint;
import org.springframework.kafka.support.Suffixer;
/**
* Retry and dead letter naming handling that add a suffix to each name.
* The suffix is taken from {@link DestinationTopic.Properties#suffix()}
*
* @author Andrea Polci
*/
public class SuffixingRetryTopicNamesProviderFactory implements RetryTopicNamesProviderFactory {
@Override
public RetryTopicNamesProvider createRetryTopicNamesProvider(DestinationTopic.Properties properties) {
return new SuffixingRetryTopicNamesProvider(properties);
}
public static class SuffixingRetryTopicNamesProvider implements RetryTopicNamesProvider {
private final Suffixer suffixer;
public SuffixingRetryTopicNamesProvider(DestinationTopic.Properties properties) {
this.suffixer = new Suffixer(properties.suffix());
}
@Override
public String getEndpointId(MethodKafkaListenerEndpoint<?, ?> endpoint) {
return this.suffixer.maybeAddTo(endpoint.getId());
}
@Override
public String getGroupId(MethodKafkaListenerEndpoint<?, ?> endpoint) {
return this.suffixer.maybeAddTo(endpoint.getGroupId());
}
@Override
public String getClientIdPrefix(MethodKafkaListenerEndpoint<?, ?> endpoint) {
return this.suffixer.maybeAddTo(endpoint.getClientIdPrefix());
}
@Override
public String getGroup(MethodKafkaListenerEndpoint<?, ?> endpoint) {
return this.suffixer.maybeAddTo(endpoint.getGroup());
}
@Override
public String getTopicName(String topic) {
return this.suffixer.maybeAddTo(topic);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/TopicSuffixingStrategy.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.retrytopic;
/**
*
* Constants for the RetryTopic functionality.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public enum TopicSuffixingStrategy {
/**
* Suffixes the topics with their index in the retry topics.
* E.g. my-retry-topic-0, my-retry-topic-1, my-retry-topic-2.
*/
SUFFIX_WITH_INDEX_VALUE,
/**
* Suffixes the topics the delay value for the topic.
* E.g. my-retry-topic-1000, my-retry-topic-2000, my-retry-topic-4000.
*/
SUFFIX_WITH_DELAY_VALUE
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/retrytopic/package-info.java | /**
* Package for retryable topic handling.
*/
package org.springframework.kafka.retrytopic;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/security | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/security/jaas/KafkaJaasLoginModuleInitializer.java | /*
* Copyright 2017-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.security.jaas;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import org.apache.kafka.common.security.JaasUtils;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.SmartInitializingSingleton;
import org.springframework.util.Assert;
/**
* Contains properties for setting up an {@link AppConfigurationEntry} that can be used
* for the Kafka client.
*
* @author Marius Bogoevici
* @author Gary Russell
*
* @since 1.3
*/
public class KafkaJaasLoginModuleInitializer implements SmartInitializingSingleton, DisposableBean {
/**
* The key for the kafka client configuration entry.
*/
public static final String KAFKA_CLIENT_CONTEXT_NAME = "KafkaClient";
/**
* Control flag values for login configuration.
*/
public enum ControlFlag {
/**
* Required - The {@code LoginModule} is required to succeed. If it succeeds or
* fails, authentication still continues to proceed down the {@code LoginModule}
* list.
*
*/
REQUIRED,
/**
* Requisite - The {@code LoginModule} is required to succeed. If it succeeds,
* authentication continues down the {@code LoginModule} list. If it fails,
* control immediately returns to the application (authentication does not proceed
* down the {@code LoginModule} list).
*/
REQUISITE,
/**
* Sufficient - The {@code LoginModule} is not required to succeed. If it does
* succeed, control immediately returns to the application (authentication does
* not proceed down the {@code LoginModule} list). If it fails, authentication
* continues down the {@code LoginModule} list.
*/
SUFFICIENT,
/**
* Optional - The {@code LoginModule} is not required to succeed. If it succeeds
* or fails, authentication still continues to proceed down the
* {@code LoginModule} list.
*/
OPTIONAL
}
private final boolean ignoreJavaLoginConfigParamSystemProperty;
private final File placeholderJaasConfiguration;
private final Map<String, String> options = new HashMap<>();
private String loginModule = "com.sun.security.auth.module.Krb5LoginModule";
private AppConfigurationEntry.LoginModuleControlFlag controlFlag =
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED;
public KafkaJaasLoginModuleInitializer() throws IOException {
// we ignore the system property if it wasn't originally set at launch
this.ignoreJavaLoginConfigParamSystemProperty = (System.getProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM) == null);
this.placeholderJaasConfiguration = File.createTempFile("kafka-client-jaas-config-placeholder", "conf");
this.placeholderJaasConfiguration.deleteOnExit();
}
public void setLoginModule(String loginModule) {
Assert.notNull(loginModule, "cannot be null");
this.loginModule = loginModule;
}
public void setControlFlag(ControlFlag controlFlag) {
Assert.notNull(controlFlag, "cannot be null");
switch (controlFlag) {
case OPTIONAL:
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL;
break;
case REQUIRED:
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED;
break;
case REQUISITE:
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUISITE;
break;
case SUFFICIENT:
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT;
break;
default:
throw new IllegalArgumentException(controlFlag + " is not a supported control flag");
}
}
public void setOptions(Map<String, String> options) {
this.options.clear();
this.options.putAll(options);
}
@Override
public void afterSingletonsInstantiated() {
// only use programmatic support if a file is not set via system property
if (this.ignoreJavaLoginConfigParamSystemProperty) {
Map<String, AppConfigurationEntry[]> configurationEntries = new HashMap<>();
AppConfigurationEntry kafkaClientConfigurationEntry = new AppConfigurationEntry(
this.loginModule,
this.controlFlag,
this.options);
configurationEntries.put(KAFKA_CLIENT_CONTEXT_NAME,
new AppConfigurationEntry[] { kafkaClientConfigurationEntry });
Configuration.setConfiguration(new InternalConfiguration(configurationEntries));
// Workaround for a 0.9 client issue where even if the Configuration is
// set
// a system property check is performed.
// Since the Configuration already exists, this will be ignored.
if (this.placeholderJaasConfiguration != null) {
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM,
this.placeholderJaasConfiguration.getAbsolutePath());
}
}
}
@Override
public void destroy() {
if (this.ignoreJavaLoginConfigParamSystemProperty) {
System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM);
}
}
private static class InternalConfiguration extends Configuration {
private final Map<String, AppConfigurationEntry[]> configurationEntries;
InternalConfiguration(Map<String, AppConfigurationEntry[]> configurationEntries) {
Assert.notNull(configurationEntries, " cannot be null");
Assert.notEmpty(configurationEntries, " cannot be empty");
this.configurationEntries = configurationEntries;
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
return this.configurationEntries.get(name);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/security | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/security/jaas/package-info.java | /**
* Provides classes related to jaas.
*/
package org.springframework.kafka.security.jaas;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/streams/HeaderEnricher.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.streams;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.kstream.Transformer;
import org.apache.kafka.streams.processor.ProcessorContext;
import org.springframework.expression.Expression;
/**
* Manipulate the headers.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.3
*
*/
public class HeaderEnricher<K, V> implements Transformer<K, V, KeyValue<K, V>> {
private final Map<String, Expression> headerExpressions = new HashMap<>();
private ProcessorContext processorContext;
public HeaderEnricher(Map<String, Expression> headerExpressions) {
this.headerExpressions.putAll(headerExpressions);
}
@Override
public void init(ProcessorContext context) {
this.processorContext = context;
}
@Override
public KeyValue<K, V> transform(K key, V value) {
Headers headers = this.processorContext.headers();
Container<K, V> container = new Container<>(this.processorContext, key, value);
this.headerExpressions.forEach((name, expression) -> {
Object headerValue = expression.getValue(container);
if (headerValue instanceof String) {
headerValue = ((String) headerValue).getBytes(StandardCharsets.UTF_8);
}
else if (!(headerValue instanceof byte[])) {
throw new IllegalStateException("Invalid header value type: " + headerValue.getClass());
}
headers.add(new RecordHeader(name, (byte[]) headerValue));
});
return new KeyValue<>(key, value);
}
@Override
public void close() {
// NO-OP
}
/**
* Container object for SpEL evaluation.
*
* @param <K> the key type.
* @param <V> the value type.
*
*/
public static final class Container<K, V> {
private final ProcessorContext context;
private final K key;
private final V value;
private Container(ProcessorContext context, K key, V value) {
this.context = context;
this.key = key;
this.value = value;
}
public ProcessorContext getContext() {
return this.context;
}
public K getKey() {
return this.key;
}
public V getValue() {
return this.value;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/streams/KafkaStreamsMicrometerListener.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.streams;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.kafka.streams.KafkaStreams;
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
import io.micrometer.core.instrument.ImmutableTag;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.Tag;
import io.micrometer.core.instrument.binder.kafka.KafkaStreamsMetrics;
/**
* Creates a {@link KafkaStreamsMetrics} for the {@link KafkaStreams}.
*
* @author Gary Russell
* @since 2.5.3
*
*/
public class KafkaStreamsMicrometerListener implements StreamsBuilderFactoryBean.Listener {
private final MeterRegistry meterRegistry;
private final List<Tag> tags;
private final Map<String, KafkaStreamsMetrics> metrics = new HashMap<>();
/**
* Construct an instance with the provided registry.
* @param meterRegistry the registry.
*/
public KafkaStreamsMicrometerListener(MeterRegistry meterRegistry) {
this(meterRegistry, Collections.emptyList());
}
/**
* Construct an instance with the provided registry and tags.
* @param meterRegistry the registry.
* @param tags the tags.
*/
public KafkaStreamsMicrometerListener(MeterRegistry meterRegistry, List<Tag> tags) {
this.meterRegistry = meterRegistry;
this.tags = tags;
}
@Override
public synchronized void streamsAdded(String id, KafkaStreams kafkaStreams) {
if (!this.metrics.containsKey(id)) {
List<Tag> streamsTags = new ArrayList<>(this.tags);
streamsTags.add(new ImmutableTag("spring.id", id));
this.metrics.put(id, new KafkaStreamsMetrics(kafkaStreams, streamsTags));
this.metrics.get(id).bindTo(this.meterRegistry);
}
}
@Override
public synchronized void streamsRemoved(String id, KafkaStreams streams) {
KafkaStreamsMetrics removed = this.metrics.remove(id);
if (removed != null) {
removed.close();
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/streams/RecoveringDeserializationExceptionHandler.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.streams;
import java.lang.reflect.Constructor;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
import org.apache.kafka.streams.processor.ProcessorContext;
import org.springframework.kafka.listener.ConsumerRecordRecoverer;
import org.springframework.util.ClassUtils;
/**
* A {@link DeserializationExceptionHandler} that calls a {@link ConsumerRecordRecoverer}.
* and continues.
*
* @author Gary Russell
* @since 2.3
*
*/
public class RecoveringDeserializationExceptionHandler implements DeserializationExceptionHandler {
/**
* Property name for configuring the recoverer using properties.
*/
public static final String KSTREAM_DESERIALIZATION_RECOVERER = "spring.deserialization.recoverer";
private static final Log LOGGER = LogFactory.getLog(RecoveringDeserializationExceptionHandler.class);
private ConsumerRecordRecoverer recoverer;
public RecoveringDeserializationExceptionHandler() {
}
public RecoveringDeserializationExceptionHandler(ConsumerRecordRecoverer recoverer) {
this.recoverer = recoverer;
}
@Override
public DeserializationHandlerResponse handle(ProcessorContext context, ConsumerRecord<byte[], byte[]> record,
Exception exception) {
if (this.recoverer == null) {
return DeserializationHandlerResponse.FAIL;
}
try {
this.recoverer.accept(record, exception);
return DeserializationHandlerResponse.CONTINUE;
}
catch (RuntimeException e) {
LOGGER.error("Recoverer threw an exception; recovery failed", e);
return DeserializationHandlerResponse.FAIL;
}
}
@Override
public void configure(Map<String, ?> configs) {
if (configs.containsKey(KSTREAM_DESERIALIZATION_RECOVERER)) {
Object configValue = configs.get(KSTREAM_DESERIALIZATION_RECOVERER);
if (configValue instanceof ConsumerRecordRecoverer) {
this.recoverer = (ConsumerRecordRecoverer) configValue;
}
else if (configValue instanceof String) {
fromString(configValue);
}
else if (configValue instanceof Class) {
fromClass(configValue);
}
else {
LOGGER.error("Unkown property type for " + KSTREAM_DESERIALIZATION_RECOVERER
+ "; failed deserializations cannot be recovered");
}
}
}
private void fromString(Object configValue) throws LinkageError {
try {
@SuppressWarnings("unchecked")
Class<? extends ConsumerRecordRecoverer> clazz =
(Class<? extends ConsumerRecordRecoverer>) ClassUtils
.forName((String) configValue,
RecoveringDeserializationExceptionHandler.class.getClassLoader());
Constructor<? extends ConsumerRecordRecoverer> constructor = clazz.getConstructor();
this.recoverer = constructor.newInstance();
}
catch (Exception e) {
LOGGER.error("Failed to instantiate recoverer, failed deserializations cannot be recovered", e);
}
}
private void fromClass(Object configValue) {
try {
@SuppressWarnings("unchecked")
Class<? extends ConsumerRecordRecoverer> clazz =
(Class<? extends ConsumerRecordRecoverer>) configValue;
Constructor<? extends ConsumerRecordRecoverer> constructor = clazz.getConstructor();
this.recoverer = constructor.newInstance();
}
catch (Exception e) {
LOGGER.error("Failed to instantiate recoverer, failed deserializations cannot be recovered", e);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/streams/package-info.java | /**
* Package for classes related to Kafka Streams.
*/
package org.springframework.kafka.streams;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/streams | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/streams/messaging/MessagingFunction.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.streams.messaging;
import org.springframework.messaging.Message;
/**
* A function that receives a spring-messaging {@link Message} and returns
* a {@link Message}.
*
* @author Gary Russell
* @since 2.3
*
*/
@FunctionalInterface
public interface MessagingFunction {
Message<?> exchange(Message<?> message);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/streams | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/streams/messaging/MessagingTransformer.java | /*
* Copyright 2019-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.streams.messaging;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.record.TimestampType;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.kstream.Transformer;
import org.apache.kafka.streams.processor.ProcessorContext;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.converter.MessagingMessageConverter;
import org.springframework.messaging.Message;
import org.springframework.util.Assert;
/**
* A {@link Transformer} implementation that invokes a {@link MessagingFunction}
* converting to/from spring-messaging {@link Message}. Can be used, for example,
* to invoke a Spring Integration flow.
*
* @param <K> the key type.
* @param <V> the value type.
* @param <R> the result value type.
*
* @author Gary Russell
* @since 2.3
*
*/
public class MessagingTransformer<K, V, R> implements Transformer<K, V, KeyValue<K, R>> {
private final MessagingFunction function;
private final MessagingMessageConverter converter;
private ProcessorContext processorContext;
/**
* Construct an instance with the provided function and converter.
* @param function the function.
* @param converter the converter.
*/
public MessagingTransformer(MessagingFunction function, MessagingMessageConverter converter) {
Assert.notNull(function, "'function' cannot be null");
Assert.notNull(converter, "'converter' cannot be null");
this.function = function;
this.converter = converter;
}
@Override
public void init(ProcessorContext context) {
this.processorContext = context;
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public KeyValue<K, R> transform(K key, V value) {
Headers headers = this.processorContext.headers();
ConsumerRecord<Object, Object> record = new ConsumerRecord<Object, Object>(this.processorContext.topic(),
this.processorContext.partition(), this.processorContext.offset(),
this.processorContext.timestamp(), TimestampType.NO_TIMESTAMP_TYPE,
0, 0,
key, value,
headers, Optional.empty());
Message<?> message = this.converter.toMessage(record, null, null, null);
message = this.function.exchange(message);
List<String> headerList = new ArrayList<>();
headers.forEach(header -> headerList.add(header.key()));
headerList.forEach(name -> headers.remove(name));
ProducerRecord<?, ?> fromMessage = this.converter.fromMessage(message, "dummy");
fromMessage.headers().forEach(header -> {
if (!header.key().equals(KafkaHeaders.TOPIC)) {
headers.add(header);
}
});
Object key2 = message.getHeaders().get(KafkaHeaders.MESSAGE_KEY);
return new KeyValue(key2 == null ? key : key2, message.getPayload());
}
@Override
public void close() {
// NO-OP
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.