index int64 | repo_id string | file_path string | content string |
|---|---|---|---|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/MethodKafkaListenerEndpoint.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.lang.reflect.Method;
import java.util.Arrays;
import org.apache.commons.logging.LogFactory;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.config.BeanExpressionContext;
import org.springframework.beans.factory.config.BeanExpressionResolver;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.core.annotation.AnnotationUtils;
import org.springframework.core.log.LogAccessor;
import org.springframework.expression.BeanResolver;
import org.springframework.kafka.listener.KafkaListenerErrorHandler;
import org.springframework.kafka.listener.MessageListenerContainer;
import org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter;
import org.springframework.kafka.listener.adapter.BatchToRecordAdapter;
import org.springframework.kafka.listener.adapter.HandlerAdapter;
import org.springframework.kafka.listener.adapter.MessagingMessageListenerAdapter;
import org.springframework.kafka.listener.adapter.RecordMessagingMessageListenerAdapter;
import org.springframework.kafka.support.JavaUtils;
import org.springframework.kafka.support.converter.BatchMessageConverter;
import org.springframework.kafka.support.converter.MessageConverter;
import org.springframework.kafka.support.converter.RecordMessageConverter;
import org.springframework.lang.Nullable;
import org.springframework.messaging.converter.SmartMessageConverter;
import org.springframework.messaging.handler.annotation.SendTo;
import org.springframework.messaging.handler.annotation.support.MessageHandlerMethodFactory;
import org.springframework.messaging.handler.invocation.InvocableHandlerMethod;
import org.springframework.util.Assert;
/**
* A {@link KafkaListenerEndpoint} providing the method to invoke to process
* an incoming message for this endpoint.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Stephane Nicoll
* @author Artem Bilan
* @author Gary Russell
* @author Venil Noronha
*/
public class MethodKafkaListenerEndpoint<K, V> extends AbstractKafkaListenerEndpoint<K, V> {
private final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass()));
private Object bean;
private Method method;
private MessageHandlerMethodFactory messageHandlerMethodFactory;
private KafkaListenerErrorHandler errorHandler;
private SmartMessageConverter messagingConverter;
/**
* Set the object instance that should manage this endpoint.
* @param bean the target bean instance.
*/
public void setBean(Object bean) {
this.bean = bean;
}
public Object getBean() {
return this.bean;
}
/**
* Set the method to invoke to process a message managed by this endpoint.
* @param method the target method for the {@link #bean}.
*/
public void setMethod(Method method) {
this.method = method;
}
public Method getMethod() {
return this.method;
}
/**
* Set the {@link MessageHandlerMethodFactory} to use to build the
* {@link InvocableHandlerMethod} responsible to manage the invocation
* of this endpoint.
* @param messageHandlerMethodFactory the {@link MessageHandlerMethodFactory} instance.
*/
public void setMessageHandlerMethodFactory(MessageHandlerMethodFactory messageHandlerMethodFactory) {
this.messageHandlerMethodFactory = messageHandlerMethodFactory;
}
/**
* Set the {@link KafkaListenerErrorHandler} to invoke if the listener method
* throws an exception.
* @param errorHandler the error handler.
* @since 1.3
*/
public void setErrorHandler(KafkaListenerErrorHandler errorHandler) {
this.errorHandler = errorHandler;
}
/**
* Set a spring-messaging {@link SmartMessageConverter} to convert the record value to
* the desired type. This will also cause the
* {@link org.springframework.messaging.MessageHeaders#CONTENT_TYPE} to be converted
* to String when mapped inbound.
* @param messagingConverter the converter.
* @since 2.7.1
*/
public void setMessagingConverter(SmartMessageConverter messagingConverter) {
this.messagingConverter = messagingConverter;
}
@Nullable
private String getReplyTopic() {
Method replyingMethod = getMethod();
if (replyingMethod != null) {
SendTo ann = AnnotationUtils.getAnnotation(replyingMethod, SendTo.class);
if (ann != null) {
if (replyingMethod.getReturnType().equals(void.class)) {
this.logger.warn(() -> "Method "
+ replyingMethod
+ " has a void return type; @SendTo is ignored" +
(this.errorHandler == null ? "" : " unless the error handler returns a result"));
}
String[] destinations = ann.value();
if (destinations.length > 1) {
throw new IllegalStateException("Invalid @" + SendTo.class.getSimpleName() + " annotation on '"
+ replyingMethod + "' one destination must be set (got " + Arrays.toString(destinations) + ")");
}
String topic = destinations.length == 1 ? destinations[0] : "";
BeanFactory beanFactory = getBeanFactory();
if (beanFactory instanceof ConfigurableListableBeanFactory) {
topic = ((ConfigurableListableBeanFactory) beanFactory).resolveEmbeddedValue(topic);
if (topic != null) {
topic = resolve(topic);
}
}
return topic;
}
}
return null;
}
/**
* Return the {@link MessageHandlerMethodFactory}.
* @return the messageHandlerMethodFactory
*/
protected MessageHandlerMethodFactory getMessageHandlerMethodFactory() {
return this.messageHandlerMethodFactory;
}
@Override
protected MessagingMessageListenerAdapter<K, V> createMessageListener(MessageListenerContainer container,
@Nullable MessageConverter messageConverter) {
Assert.state(this.messageHandlerMethodFactory != null,
"Could not create message listener - MessageHandlerMethodFactory not set");
MessagingMessageListenerAdapter<K, V> messageListener = createMessageListenerInstance(messageConverter);
messageListener.setHandlerMethod(configureListenerAdapter(messageListener));
JavaUtils.INSTANCE
.acceptIfNotNull(getReplyTopic(), replyTopic -> {
Assert.state(getMethod().getReturnType().equals(void.class)
|| getReplyTemplate() != null, "a KafkaTemplate is required to support replies");
messageListener.setReplyTopic(replyTopic);
})
.acceptIfNotNull(getReplyTemplate(), messageListener::setReplyTemplate);
return messageListener;
}
/**
* Create a {@link HandlerAdapter} for this listener adapter.
* @param messageListener the listener adapter.
* @return the handler adapter.
*/
protected HandlerAdapter configureListenerAdapter(MessagingMessageListenerAdapter<K, V> messageListener) {
InvocableHandlerMethod invocableHandlerMethod =
this.messageHandlerMethodFactory.createInvocableHandlerMethod(getBean(), getMethod());
return new HandlerAdapter(invocableHandlerMethod);
}
/**
* Create an empty {@link MessagingMessageListenerAdapter} instance.
* @param messageConverter the converter (may be null).
* @return the {@link MessagingMessageListenerAdapter} instance.
*/
protected MessagingMessageListenerAdapter<K, V> createMessageListenerInstance(
@Nullable MessageConverter messageConverter) {
MessagingMessageListenerAdapter<K, V> listener;
if (isBatchListener()) {
BatchMessagingMessageListenerAdapter<K, V> messageListener = new BatchMessagingMessageListenerAdapter<K, V>(
this.bean, this.method, this.errorHandler);
BatchToRecordAdapter<K, V> batchToRecordAdapter = getBatchToRecordAdapter();
if (batchToRecordAdapter != null) {
messageListener.setBatchToRecordAdapter(batchToRecordAdapter);
}
if (messageConverter instanceof BatchMessageConverter) {
messageListener.setBatchMessageConverter((BatchMessageConverter) messageConverter);
}
listener = messageListener;
}
else {
RecordMessagingMessageListenerAdapter<K, V> messageListener = new RecordMessagingMessageListenerAdapter<K, V>(
this.bean, this.method, this.errorHandler);
if (messageConverter instanceof RecordMessageConverter) {
messageListener.setMessageConverter((RecordMessageConverter) messageConverter);
}
listener = messageListener;
}
if (this.messagingConverter != null) {
listener.setMessagingConverter(this.messagingConverter);
}
BeanResolver resolver = getBeanResolver();
if (resolver != null) {
listener.setBeanResolver(resolver);
}
return listener;
}
@SuppressWarnings("null")
private String resolve(String value) {
BeanExpressionContext beanExpressionContext = getBeanExpressionContext();
BeanExpressionResolver resolver = getResolver();
if (resolver != null && beanExpressionContext != null) {
Object newValue = resolver.evaluate(value, beanExpressionContext);
Assert.isInstanceOf(String.class, newValue, "Invalid @SendTo expression");
return (String) newValue;
}
else {
return value;
}
}
@Override
protected StringBuilder getEndpointDescription() {
return super.getEndpointDescription()
.append(" | bean='").append(this.bean).append("'")
.append(" | method='").append(this.method).append("'");
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/MultiMethodKafkaListenerEndpoint.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import org.springframework.kafka.listener.adapter.DelegatingInvocableHandler;
import org.springframework.kafka.listener.adapter.HandlerAdapter;
import org.springframework.kafka.listener.adapter.MessagingMessageListenerAdapter;
import org.springframework.lang.Nullable;
import org.springframework.messaging.handler.invocation.InvocableHandlerMethod;
import org.springframework.validation.Validator;
/**
* The {@link MethodKafkaListenerEndpoint} extension for several POJO methods
* based on the {@link org.springframework.kafka.annotation.KafkaHandler}.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*
* @see org.springframework.kafka.annotation.KafkaHandler
* @see DelegatingInvocableHandler
*/
public class MultiMethodKafkaListenerEndpoint<K, V> extends MethodKafkaListenerEndpoint<K, V> {
private final List<Method> methods;
private final Method defaultMethod;
private Validator validator;
/**
* Construct an instance for the provided methods, default method and bean.
* @param methods the methods.
* @param defaultMethod the default method.
* @param bean the bean.
* @since 2.1.3
*/
public MultiMethodKafkaListenerEndpoint(List<Method> methods, @Nullable Method defaultMethod, Object bean) {
this.methods = methods;
this.defaultMethod = defaultMethod;
setBean(bean);
}
/**
* Set a payload validator.
* @param validator the validator.
* @since 2.5.11
*/
public void setValidator(Validator validator) {
this.validator = validator;
}
@Override
protected HandlerAdapter configureListenerAdapter(MessagingMessageListenerAdapter<K, V> messageListener) {
List<InvocableHandlerMethod> invocableHandlerMethods = new ArrayList<InvocableHandlerMethod>();
InvocableHandlerMethod defaultHandler = null;
for (Method method : this.methods) {
InvocableHandlerMethod handler = getMessageHandlerMethodFactory()
.createInvocableHandlerMethod(getBean(), method);
invocableHandlerMethods.add(handler);
if (method.equals(this.defaultMethod)) {
defaultHandler = handler;
}
}
DelegatingInvocableHandler delegatingHandler = new DelegatingInvocableHandler(invocableHandlerMethods,
defaultHandler, getBean(), getResolver(), getBeanExpressionContext(), getBeanFactory(), this.validator);
return new HandlerAdapter(delegatingHandler);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/StreamsBuilderFactoryBean.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.streams.KafkaClientSupplier;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler;
import org.apache.kafka.streams.processor.StateRestoreListener;
import org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier;
import org.springframework.beans.factory.BeanNameAware;
import org.springframework.beans.factory.config.AbstractFactoryBean;
import org.springframework.context.SmartLifecycle;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.KafkaException;
import org.springframework.kafka.core.CleanupConfig;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
/**
* An {@link AbstractFactoryBean} for the {@link StreamsBuilder} instance
* and lifecycle control for the internal {@link KafkaStreams} instance.
* <p>
* A fine grained control on {@link KafkaStreams} can be achieved by
* {@link KafkaStreamsCustomizer}s.
*
* @author Artem Bilan
* @author Ivan Ursul
* @author Soby Chacko
* @author Zach Olauson
* @author Nurettin Yilmaz
* @author Denis Washington
* @author Gary Russell
*
* @since 1.1.4
*/
public class StreamsBuilderFactoryBean extends AbstractFactoryBean<StreamsBuilder>
implements SmartLifecycle, BeanNameAware {
/**
* The default {@link Duration} of {@code 10 seconds} for close timeout.
* @see KafkaStreams#close(Duration)
*/
public static final Duration DEFAULT_CLOSE_TIMEOUT = Duration.ofSeconds(10);
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(StreamsBuilderFactoryBean.class));
private static final String STREAMS_CONFIG_MUST_NOT_BE_NULL = "'streamsConfig' must not be null";
private static final String CLEANUP_CONFIG_MUST_NOT_BE_NULL = "'cleanupConfig' must not be null";
private KafkaClientSupplier clientSupplier = new DefaultKafkaClientSupplier();
private Properties properties;
private CleanupConfig cleanupConfig;
private final List<Listener> listeners = new ArrayList<>();
private KafkaStreamsInfrastructureCustomizer infrastructureCustomizer = new KafkaStreamsInfrastructureCustomizer() {
};
private KafkaStreamsCustomizer kafkaStreamsCustomizer;
private KafkaStreams.StateListener stateListener;
private StateRestoreListener stateRestoreListener;
private Thread.UncaughtExceptionHandler uncaughtExceptionHandler;
private StreamsUncaughtExceptionHandler streamsUncaughtExceptionHandler;
private boolean autoStartup = true;
private int phase = Integer.MAX_VALUE - 1000; // NOSONAR magic #
private Duration closeTimeout = DEFAULT_CLOSE_TIMEOUT;
private KafkaStreams kafkaStreams;
private volatile boolean running;
private Topology topology;
private String beanName;
/**
* Default constructor that creates the factory without configuration
* {@link Properties}. It is the factory user's responsibility to properly set
* {@link Properties} using
* {@link StreamsBuilderFactoryBean#setStreamsConfiguration(Properties)}.
* @since 2.1.3.
*/
public StreamsBuilderFactoryBean() {
this.cleanupConfig = new CleanupConfig();
}
/**
* Construct an instance with the supplied streams configuration and
* clean up configuration.
* @param streamsConfig the streams configuration.
* @param cleanupConfig the cleanup configuration.
* @since 2.2
*/
public StreamsBuilderFactoryBean(KafkaStreamsConfiguration streamsConfig, CleanupConfig cleanupConfig) {
Assert.notNull(streamsConfig, STREAMS_CONFIG_MUST_NOT_BE_NULL);
Assert.notNull(cleanupConfig, CLEANUP_CONFIG_MUST_NOT_BE_NULL);
this.properties = streamsConfig.asProperties();
this.cleanupConfig = cleanupConfig;
}
/**
* Construct an instance with the supplied streams configuration.
* @param streamsConfig the streams configuration.
* @since 2.2
*/
public StreamsBuilderFactoryBean(KafkaStreamsConfiguration streamsConfig) {
this(streamsConfig, new CleanupConfig());
}
@Override
public synchronized void setBeanName(String name) {
this.beanName = name;
}
/**
* Set the streams configuration {@link Properties} on this factory.
* @param streamsConfig the streams configuration.
* @since 2.2
*/
public void setStreamsConfiguration(Properties streamsConfig) {
Assert.notNull(streamsConfig, STREAMS_CONFIG_MUST_NOT_BE_NULL);
this.properties = streamsConfig;
}
@Nullable
public Properties getStreamsConfiguration() {
return this.properties; // NOSONAR - inconsistent synchronization
}
public void setClientSupplier(KafkaClientSupplier clientSupplier) {
Assert.notNull(clientSupplier, "'clientSupplier' must not be null");
this.clientSupplier = clientSupplier; // NOSONAR (sync)
}
/**
* Set a customizer to configure the builder and/or topology before creating the stream.
* @param infrastructureCustomizer the customizer
* @since 2.4.1
*/
public void setInfrastructureCustomizer(KafkaStreamsInfrastructureCustomizer infrastructureCustomizer) {
Assert.notNull(infrastructureCustomizer, "'infrastructureCustomizer' must not be null");
this.infrastructureCustomizer = infrastructureCustomizer; // NOSONAR (sync)
}
/**
* Specify a {@link KafkaStreamsCustomizer} to customize a {@link KafkaStreams}
* instance during {@link #start()}.
* @param kafkaStreamsCustomizer the {@link KafkaStreamsCustomizer} to use.
* @since 2.1.5
*/
public void setKafkaStreamsCustomizer(KafkaStreamsCustomizer kafkaStreamsCustomizer) {
Assert.notNull(kafkaStreamsCustomizer, "'kafkaStreamsCustomizer' must not be null");
this.kafkaStreamsCustomizer = kafkaStreamsCustomizer; // NOSONAR (sync)
}
public void setStateListener(KafkaStreams.StateListener stateListener) {
this.stateListener = stateListener; // NOSONAR (sync)
}
/**
* Obsolete.
* @param exceptionHandler the handler.
* @deprecated in favor of
* {@link #setStreamsUncaughtExceptionHandler(StreamsUncaughtExceptionHandler)}.
*/
@Deprecated
public void setUncaughtExceptionHandler(Thread.UncaughtExceptionHandler exceptionHandler) {
this.uncaughtExceptionHandler = exceptionHandler; // NOSONAR (sync)
}
/**
* Set a {@link StreamsUncaughtExceptionHandler}. Supercedes
* {@link #setUncaughtExceptionHandler(java.lang.Thread.UncaughtExceptionHandler)}.
* @param streamsUncaughtExceptionHandler the handler.
* @since 2.8
*/
public void setStreamsUncaughtExceptionHandler(StreamsUncaughtExceptionHandler streamsUncaughtExceptionHandler) {
this.streamsUncaughtExceptionHandler = streamsUncaughtExceptionHandler; // NOSONAR (sync)
}
/**
* Retrieves the current {@link StreamsUncaughtExceptionHandler} set on this factory bean.
* @return {@link StreamsUncaughtExceptionHandler}
* @since 2.8.4
*/
@Nullable
public StreamsUncaughtExceptionHandler getStreamsUncaughtExceptionHandler() {
return this.streamsUncaughtExceptionHandler;
}
public void setStateRestoreListener(StateRestoreListener stateRestoreListener) {
this.stateRestoreListener = stateRestoreListener; // NOSONAR (sync)
}
/**
* Specify the timeout in seconds for the {@link KafkaStreams#close(Duration)}
* operation. Defaults to {@link #DEFAULT_CLOSE_TIMEOUT} seconds.
* @param closeTimeout the timeout for close in seconds.
* @see KafkaStreams#close(Duration)
*/
public void setCloseTimeout(int closeTimeout) {
this.closeTimeout = Duration.ofSeconds(closeTimeout); // NOSONAR (sync)
}
/**
* Providing access to the associated {@link Topology} of this
* {@link StreamsBuilderFactoryBean}.
* @return {@link Topology} object
* @since 2.4.4
*/
@Nullable
public Topology getTopology() {
return this.topology;
}
@Override
public Class<?> getObjectType() {
return StreamsBuilder.class;
}
public void setAutoStartup(boolean autoStartup) {
this.autoStartup = autoStartup;
}
public void setPhase(int phase) {
this.phase = phase;
}
@Override
public int getPhase() {
return this.phase;
}
public void setCleanupConfig(CleanupConfig cleanupConfig) {
Assert.notNull(cleanupConfig, CLEANUP_CONFIG_MUST_NOT_BE_NULL);
this.cleanupConfig = cleanupConfig; // NOSONAR (sync)
}
/**
* Get a managed by this {@link StreamsBuilderFactoryBean} {@link KafkaStreams} instance.
* @return KafkaStreams managed instance;
* may be null if this {@link StreamsBuilderFactoryBean} hasn't been started.
* @since 1.1.4
*/
@Nullable
public synchronized KafkaStreams getKafkaStreams() {
return this.kafkaStreams;
}
/**
* Get the current list of listeners.
* @return the listeners.
* @since 2.5.3
*/
public List<Listener> getListeners() {
return Collections.unmodifiableList(this.listeners);
}
/**
* Add a {@link Listener} which will be called after starting and stopping the
* streams.
* @param listener the listener.
* @since 2.5.3
*/
public void addListener(Listener listener) {
Assert.notNull(listener, "'listener' cannot be null");
this.listeners.add(listener);
}
/**
* Remove a listener.
* @param listener the listener.
* @return true if removed.
* @since 2.5.3
*/
public boolean removeListener(Listener listener) {
return this.listeners.remove(listener);
}
@Override
protected synchronized StreamsBuilder createInstance() {
if (this.autoStartup) {
Assert.state(this.properties != null,
"streams configuration properties must not be null");
}
StreamsBuilder builder = new StreamsBuilder();
this.infrastructureCustomizer.configureBuilder(builder);
return builder;
}
@Override
public boolean isAutoStartup() {
return this.autoStartup;
}
@Override
public void stop(Runnable callback) {
stop();
if (callback != null) {
callback.run();
}
}
@SuppressWarnings("deprecation")
@Override
public synchronized void start() {
if (!this.running) {
try {
Assert.state(this.properties != null,
"streams configuration properties must not be null");
Topology topology = getObject().build(this.properties); // NOSONAR: getObject() cannot return null
this.infrastructureCustomizer.configureTopology(topology);
this.topology = topology;
LOGGER.debug(() -> topology.describe().toString());
this.kafkaStreams = new KafkaStreams(topology, this.properties, this.clientSupplier);
this.kafkaStreams.setStateListener(this.stateListener);
this.kafkaStreams.setGlobalStateRestoreListener(this.stateRestoreListener);
if (this.streamsUncaughtExceptionHandler != null) {
this.kafkaStreams.setUncaughtExceptionHandler(this.streamsUncaughtExceptionHandler);
}
else {
this.kafkaStreams.setUncaughtExceptionHandler(this.uncaughtExceptionHandler);
}
if (this.kafkaStreamsCustomizer != null) {
this.kafkaStreamsCustomizer.customize(this.kafkaStreams);
}
if (this.cleanupConfig.cleanupOnStart()) {
this.kafkaStreams.cleanUp();
}
this.kafkaStreams.start();
for (Listener listener : this.listeners) {
listener.streamsAdded(this.beanName, this.kafkaStreams);
}
this.running = true;
}
catch (Exception e) {
throw new KafkaException("Could not start stream: ", e);
}
}
}
@Override
public synchronized void stop() {
if (this.running) {
try {
if (this.kafkaStreams != null) {
this.kafkaStreams.close(this.closeTimeout);
if (this.cleanupConfig.cleanupOnStop()) {
this.kafkaStreams.cleanUp();
}
for (Listener listener : this.listeners) {
listener.streamsRemoved(this.beanName, this.kafkaStreams);
}
this.kafkaStreams = null;
}
}
catch (Exception e) {
LOGGER.error(e, "Failed to stop streams");
}
finally {
this.running = false;
}
}
}
@Override
public synchronized boolean isRunning() {
return this.running;
}
/**
* Called whenever a {@link KafkaStreams} is added or removed.
*
* @since 2.5.3
*
*/
public interface Listener {
/**
* A new {@link KafkaStreams} was created.
* @param id the streams id (factory bean name).
* @param streams the streams;
*/
default void streamsAdded(String id, KafkaStreams streams) {
}
/**
* An existing {@link KafkaStreams} was removed.
* @param id the streams id (factory bean name).
* @param streams the streams;
*/
default void streamsRemoved(String id, KafkaStreams streams) {
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/StreamsBuilderFactoryBeanConfigurer.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import org.springframework.core.Ordered;
/**
* A configurer for {@link StreamsBuilderFactoryBean}. Applied, in order, to the single
* {@link StreamsBuilderFactoryBean} configured by the framework. Invoked after the bean
* is created and before it is started. Default order is 0.
*
* @author Gary Russell
* @since 2.6.7
*
*/
@FunctionalInterface
public interface StreamsBuilderFactoryBeanConfigurer extends Ordered {
/**
* Configure the factory bean.
* @param factoryBean the factory bean.
*/
void configure(StreamsBuilderFactoryBean factoryBean);
@Override
default int getOrder() {
return 0;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/StreamsBuilderFactoryBeanCustomizer.java | /*
* Copyright 2020-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
/**
* A customizer for the {@link StreamsBuilderFactoryBean} that is implicitly created by
* {@link org.springframework.kafka.annotation.EnableKafkaStreams}. If exactly one
* implementation of this interface is found in the application context (or one is marked
* as {@link org.springframework.context.annotation.Primary}, it will be invoked after the
* factory bean has been created and before it is started.
*
* @author Gary Russell
* @since 2.3
* @deprecated in favor of {@code StreamsBuilderFactoryBeanConfigurer} due to a name
* clash with a similar class in Spring Boot.
*/
@Deprecated
@FunctionalInterface
public interface StreamsBuilderFactoryBeanCustomizer {
/**
* Configure the factory bean.
* @param factoryBean the factory bean.
*/
void configure(StreamsBuilderFactoryBean factoryBean);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/TopicBuilder.java | /*
* Copyright 2019-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.common.config.TopicConfig;
/**
* Builder for a {@link NewTopic}. Since 2.6 partitions and replicas default to
* {@link Optional#empty()} indicating the broker defaults will be applied.
*
* @author Gary Russell
* @since 2.3
*
*/
public final class TopicBuilder {
private final String name;
private Optional<Integer> partitions = Optional.empty();
private Optional<Short> replicas = Optional.empty();
private Map<Integer, List<Integer>> replicasAssignments;
private final Map<String, String> configs = new HashMap<>();
private TopicBuilder(String name) {
this.name = name;
}
/**
* Set the number of partitions (default broker 'num.partitions').
* @param partitionCount the partitions.
* @return the builder.
*/
public TopicBuilder partitions(int partitionCount) {
this.partitions = Optional.of(partitionCount);
return this;
}
/**
* Set the number of replicas (default broker 'default.replication.factor').
* @param replicaCount the replicas (which will be cast to short).
* @return the builder.
*/
public TopicBuilder replicas(int replicaCount) {
this.replicas = Optional.of((short) replicaCount);
return this;
}
/**
* Set the replica assignments.
* @param replicaAssignments the assignments.
* @return the builder.
* @see NewTopic#replicasAssignments()
*/
public TopicBuilder replicasAssignments(Map<Integer, List<Integer>> replicaAssignments) {
replicaAssignments.forEach((part, list) -> assignReplicas(part, list));
return this;
}
/**
* Add an individual replica assignment.
* @param partition the partition.
* @param replicaList the replicas.
* @return the builder.
* @see NewTopic#replicasAssignments()
*/
public TopicBuilder assignReplicas(int partition, List<Integer> replicaList) {
if (this.replicasAssignments == null) {
this.replicasAssignments = new HashMap<>();
}
this.replicasAssignments.put(partition, new ArrayList<>(replicaList));
return this;
}
/**
* Set the configs.
* @param configProps the configs.
* @return the builder.
* @see NewTopic#configs()
*/
public TopicBuilder configs(Map<String, String> configProps) {
this.configs.putAll(configProps);
return this;
}
/**
* Set a configuration option.
* @param configName the name.
* @param configValue the value.
* @return the builder
* @see TopicConfig
*/
public TopicBuilder config(String configName, String configValue) {
this.configs.put(configName, configValue);
return this;
}
/**
* Set the {@link TopicConfig#CLEANUP_POLICY_CONFIG} to
* {@link TopicConfig#CLEANUP_POLICY_COMPACT}.
* @return the builder.
*/
public TopicBuilder compact() {
this.configs.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT);
return this;
}
public NewTopic build() {
NewTopic topic = this.replicasAssignments == null
? new NewTopic(this.name, this.partitions, this.replicas)
: new NewTopic(this.name, this.replicasAssignments);
if (this.configs.size() > 0) {
topic.configs(this.configs);
}
return topic;
}
/**
* Create a TopicBuilder with the supplied name.
* @param name the name.
* @return the builder.
*/
public static TopicBuilder name(String name) {
return new TopicBuilder(name);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/package-info.java | /**
* Package for kafka configuration
*/
@org.springframework.lang.NonNullApi
package org.springframework.kafka.config;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/ABSwitchCluster.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Supplier;
import org.springframework.util.Assert;
/**
* A {@link Supplier} for bootstrap servers that can toggle between 2 lists of servers.
*
* @author Gary Russell
* @since 2.5
*
*/
public class ABSwitchCluster implements Supplier<String> {
private final AtomicBoolean which = new AtomicBoolean(true);
private final String primary;
private final String secondary;
/**
* Construct an instance with primary and secondary bootstrap servers.
* @param primary the primary.
* @param secondary the secondary.
*/
public ABSwitchCluster(String primary, String secondary) {
Assert.hasText(primary, "'primary' is required");
Assert.hasText(secondary, "'secondary' is required");
this.primary = primary;
this.secondary = secondary;
}
@Override
public String get() {
return this.which.get() ? this.primary : this.secondary;
}
/**
* Get whether or not the primary cluster is active.
* @return true for primary, false for secondary.
*/
public boolean isPrimary() {
return this.which.get();
}
/**
* Use the primary cluster.
*/
public void primary() {
this.which.set(true);
}
/**
* Use the secondary cluster.
*/
public void secondary() {
this.which.set(false);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/CleanupConfig.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
/**
* Specifies time of {@link org.apache.kafka.streams.KafkaStreams#cleanUp()} execution.
*
* @author Pawel Szymczyk
*/
public class CleanupConfig {
private final boolean onStart;
private final boolean onStop;
public CleanupConfig() {
this(false, false);
}
public CleanupConfig(boolean onStart, boolean onStop) {
this.onStart = onStart;
this.onStop = onStop;
}
public boolean cleanupOnStart() {
return this.onStart;
}
public boolean cleanupOnStop() {
return this.onStop;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/ConsumerFactory.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.serialization.Deserializer;
import org.springframework.lang.Nullable;
/**
* The strategy to produce a {@link Consumer} instance(s).
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @author Artem Bilan
*/
public interface ConsumerFactory<K, V> {
/**
* Create a consumer with the group id and client id as configured in the properties.
* @return the consumer.
*/
default Consumer<K, V> createConsumer() {
return createConsumer(null);
}
/**
* Create a consumer, appending the suffix to the {@code client.id} property,
* if present.
* @param clientIdSuffix the suffix.
* @return the consumer.
* @since 1.3
*/
default Consumer<K, V> createConsumer(@Nullable String clientIdSuffix) {
return createConsumer(null, clientIdSuffix);
}
/**
* Create a consumer with an explicit group id; in addition, the
* client id suffix is appended to the {@code client.id} property, if both
* are present.
* @param groupId the group id.
* @param clientIdSuffix the suffix.
* @return the consumer.
* @since 1.3
*/
default Consumer<K, V> createConsumer(@Nullable String groupId, @Nullable String clientIdSuffix) {
return createConsumer(groupId, null, clientIdSuffix);
}
/**
* Create a consumer with an explicit group id; in addition, the
* client id suffix is appended to the clientIdPrefix which overrides the
* {@code client.id} property, if present.
* @param groupId the group id.
* @param clientIdPrefix the prefix.
* @param clientIdSuffix the suffix.
* @return the consumer.
* @since 2.1.1
*/
Consumer<K, V> createConsumer(@Nullable String groupId, @Nullable String clientIdPrefix,
@Nullable String clientIdSuffix);
/**
* Create a consumer with an explicit group id; in addition, the
* client id suffix is appended to the clientIdPrefix which overrides the
* {@code client.id} property, if present. In addition, consumer properties can
* be overridden if the factory implementation supports it.
* @param groupId the group id.
* @param clientIdPrefix the prefix.
* @param clientIdSuffix the suffix.
* @param properties the properties to override.
* @return the consumer.
* @since 2.2.4
*/
default Consumer<K, V> createConsumer(@Nullable String groupId, @Nullable String clientIdPrefix,
@Nullable String clientIdSuffix, @Nullable Properties properties) {
return createConsumer(groupId, clientIdPrefix, clientIdSuffix);
}
/**
* Return true if consumers created by this factory use auto commit.
* @return true if auto commit.
*/
boolean isAutoCommit();
/**
* Return an unmodifiable reference to the configuration map for this factory.
* Useful for cloning to make a similar factory.
* @return the configs.
* @since 2.0
*/
default Map<String, Object> getConfigurationProperties() {
throw new UnsupportedOperationException("'getConfigurationProperties()' is not supported");
}
/**
* Return the configured key deserializer (if provided as an object instead
* of a class name in the properties).
* @return the deserializer.
* @since 2.0
*/
@Nullable
default Deserializer<K> getKeyDeserializer() {
return null;
}
/**
* Return the configured value deserializer (if provided as an object instead
* of a class name in the properties).
* @return the deserializer.
* @since 2.0
*/
@Nullable
default Deserializer<V> getValueDeserializer() {
return null;
}
/**
* Remove a listener.
* @param listener the listener.
* @return true if removed.
* @since 2.5.3
*/
default boolean removeListener(Listener<K, V> listener) {
return false;
}
/**
* Add a listener at a specific index.
* @param index the index (list position).
* @param listener the listener.
* @since 2.5.3
*/
default void addListener(int index, Listener<K, V> listener) {
}
/**
* Add a listener.
* @param listener the listener.
* @since 2.5.3
*/
default void addListener(Listener<K, V> listener) {
}
/**
* Get the current list of listeners.
* @return the listeners.
* @since 2.5.3
*/
default List<Listener<K, V>> getListeners() {
return Collections.emptyList();
}
/**
* Add a post processor.
* @param postProcessor the post processor.
* @since 2.5.3
*/
default void addPostProcessor(ConsumerPostProcessor<K, V> postProcessor) {
}
/**
* Remove a post processor.
* @param postProcessor the post processor.
* @return true if removed.
* @since 2.5.3
*/
default boolean removePostProcessor(ConsumerPostProcessor<K, V> postProcessor) {
return false;
}
/**
* Get the current list of post processors.
* @return the post processor.
* @since 2.5.3
*/
default List<ConsumerPostProcessor<K, V>> getPostProcessors() {
return Collections.emptyList();
}
/**
* Update the consumer configuration map; useful for situations such as
* credential rotation.
* @param updates the configuration properties to update.
* @since 2.7
*/
default void updateConfigs(Map<String, Object> updates) {
}
/**
* Remove the specified key from the configuration map.
* @param configKey the key to remove.
* @since 2.7
*/
default void removeConfig(String configKey) {
}
/**
* Called whenever a consumer is added or removed.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @since 2.5
*
*/
interface Listener<K, V> {
/**
* A new consumer was created.
* @param id the consumer id (factory bean name and client.id separated by a
* period).
* @param consumer the consumer.
*/
default void consumerAdded(String id, Consumer<K, V> consumer) {
}
/**
* An existing consumer was removed.
* @param id the consumer id (factory bean name and client.id separated by a
* period).
* @param consumer the consumer.
*/
default void consumerRemoved(String id, Consumer<K, V> consumer) {
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/ConsumerPostProcessor.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.util.function.Function;
import org.apache.kafka.clients.consumer.Consumer;
/**
* Called by consumer factories to perform post processing on newly created consumers.
*
* @param <K> the key type.
* @param <V> the value type
*
* @author Gary Russell
* @since 2.5.3
*
*/
public interface ConsumerPostProcessor<K, V> extends Function<Consumer<K, V>, Consumer<K, V>> {
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/DefaultKafkaConsumerFactory.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Supplier;
import org.aopalliance.aop.Advice;
import org.aopalliance.intercept.MethodInterceptor;
import org.aopalliance.intercept.MethodInvocation;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.serialization.Deserializer;
import org.springframework.aop.framework.ProxyFactory;
import org.springframework.aop.support.NameMatchMethodPointcutAdvisor;
import org.springframework.beans.factory.BeanNameAware;
import org.springframework.core.log.LogAccessor;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
/**
* The {@link ConsumerFactory} implementation to produce new {@link Consumer} instances
* for provided {@link Map} {@code configs} and optional {@link Deserializer}s on each {@link #createConsumer()}
* invocation.
* <p>
* If you are using {@link Deserializer}s that have no-arg constructors and require no setup, then simplest to
* specify {@link Deserializer} classes against {@link ConsumerConfig#KEY_DESERIALIZER_CLASS_CONFIG} and
* {@link ConsumerConfig#VALUE_DESERIALIZER_CLASS_CONFIG} keys in the {@code configs} passed to the
* {@link DefaultKafkaConsumerFactory} constructor.
* <p>
* If that is not possible, but you are using {@link Deserializer}s that may be shared between all {@link Consumer}
* instances (and specifically that their close() method is a no-op), then you can pass in {@link Deserializer}
* instances for one or both of the key and value deserializers.
* <p>
* If neither of the above is true then you may provide a {@link Supplier} for one or both {@link Deserializer}s
* which will be used to obtain {@link Deserializer}(s) each time a {@link Consumer} is created by the factory.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @author Murali Reddy
* @author Artem Bilan
* @author Chris Gilbert
*/
public class DefaultKafkaConsumerFactory<K, V> extends KafkaResourceFactory
implements ConsumerFactory<K, V>, BeanNameAware {
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(DefaultKafkaConsumerFactory.class));
private final Map<String, Object> configs;
private final List<Listener<K, V>> listeners = new ArrayList<>();
private final List<ConsumerPostProcessor<K, V>> postProcessors = new ArrayList<>();
private Supplier<Deserializer<K>> keyDeserializerSupplier;
private Supplier<Deserializer<V>> valueDeserializerSupplier;
private String beanName = "not.managed.by.Spring";
/**
* Construct a factory with the provided configuration.
* @param configs the configuration.
*/
public DefaultKafkaConsumerFactory(Map<String, Object> configs) {
this(configs, () -> null, () -> null);
}
/**
* Construct a factory with the provided configuration and deserializers.
* The deserializers' {@code configure()} methods will be called with the
* configuration map.
* @param configs the configuration.
* @param keyDeserializer the key {@link Deserializer}.
* @param valueDeserializer the value {@link Deserializer}.
*/
public DefaultKafkaConsumerFactory(Map<String, Object> configs,
@Nullable Deserializer<K> keyDeserializer,
@Nullable Deserializer<V> valueDeserializer) {
this(configs, () -> keyDeserializer, () -> valueDeserializer);
}
/**
* Construct a factory with the provided configuration and deserializer suppliers.
* When the suppliers are invoked to get an instance, the deserializers'
* {@code configure()} methods will be called with the configuration map.
* @param configs the configuration.
* @param keyDeserializerSupplier the key {@link Deserializer} supplier function.
* @param valueDeserializerSupplier the value {@link Deserializer} supplier function.
* @since 2.3
*/
public DefaultKafkaConsumerFactory(Map<String, Object> configs,
@Nullable Supplier<Deserializer<K>> keyDeserializerSupplier,
@Nullable Supplier<Deserializer<V>> valueDeserializerSupplier) {
this.configs = new ConcurrentHashMap<>(configs);
this.keyDeserializerSupplier = keyDeserializerSupplier(keyDeserializerSupplier);
this.valueDeserializerSupplier = valueDeserializerSupplier(valueDeserializerSupplier);
}
private Supplier<Deserializer<K>> keyDeserializerSupplier(
@Nullable Supplier<Deserializer<K>> keyDeserializerSupplier) {
return keyDeserializerSupplier == null
? () -> null
: () -> {
Deserializer<K> deserializer = keyDeserializerSupplier.get();
if (deserializer != null) {
deserializer.configure(this.configs, true);
}
return deserializer;
};
}
private Supplier<Deserializer<V>> valueDeserializerSupplier(
@Nullable Supplier<Deserializer<V>> valueDeserializerSupplier) {
return valueDeserializerSupplier == null
? () -> null
: () -> {
Deserializer<V> deserializer = valueDeserializerSupplier.get();
if (deserializer != null) {
deserializer.configure(this.configs, false);
}
return deserializer;
};
}
@Override
public void setBeanName(String name) {
this.beanName = name;
}
/**
* Set the key deserializer.
* @param keyDeserializer the deserializer.
*/
public void setKeyDeserializer(@Nullable Deserializer<K> keyDeserializer) {
this.keyDeserializerSupplier = keyDeserializerSupplier(() -> keyDeserializer);
}
/**
* Set the value deserializer.
* @param valueDeserializer the valuee deserializer.
*/
public void setValueDeserializer(@Nullable Deserializer<V> valueDeserializer) {
this.valueDeserializerSupplier = valueDeserializerSupplier(() -> valueDeserializer);
}
/**
* Set a supplier to supply instances of the key deserializer.
* @param keyDeserializerSupplier the supplier.
* @since 2.8
*/
public void setKeyDeserializerSupplier(Supplier<Deserializer<K>> keyDeserializerSupplier) {
this.keyDeserializerSupplier = keyDeserializerSupplier(keyDeserializerSupplier);
}
/**
* Set a supplier to supply instances of the value deserializer.
* @param valueDeserializerSupplier the supplier.
* @since 2.8
*/
public void setValueDeserializerSupplier(Supplier<Deserializer<V>> valueDeserializerSupplier) {
this.valueDeserializerSupplier = valueDeserializerSupplier(valueDeserializerSupplier);
}
@Override
public Map<String, Object> getConfigurationProperties() {
Map<String, Object> configs2 = new HashMap<>(this.configs);
checkBootstrap(configs2);
return Collections.unmodifiableMap(configs2);
}
@Override
public Deserializer<K> getKeyDeserializer() {
return this.keyDeserializerSupplier.get();
}
@Override
public Deserializer<V> getValueDeserializer() {
return this.valueDeserializerSupplier.get();
}
/**
* Get the current list of listeners.
* @return the listeners.
* @since 2.5
*/
@Override
public List<Listener<K, V>> getListeners() {
return Collections.unmodifiableList(this.listeners);
}
@Override
public List<ConsumerPostProcessor<K, V>> getPostProcessors() {
return Collections.unmodifiableList(this.postProcessors);
}
/**
* Add a listener.
* @param listener the listener.
* @since 2.5
*/
@Override
public void addListener(Listener<K, V> listener) {
Assert.notNull(listener, "'listener' cannot be null");
this.listeners.add(listener);
}
/**
* Add a listener at a specific index.
* @param index the index (list position).
* @param listener the listener.
* @since 2.5
*/
@Override
public void addListener(int index, Listener<K, V> listener) {
Assert.notNull(listener, "'listener' cannot be null");
if (index >= this.listeners.size()) {
this.listeners.add(listener);
}
else {
this.listeners.add(index, listener);
}
}
@Override
public void addPostProcessor(ConsumerPostProcessor<K, V> postProcessor) {
Assert.notNull(postProcessor, "'postProcessor' cannot be null");
this.postProcessors.add(postProcessor);
}
@Override
public boolean removePostProcessor(ConsumerPostProcessor<K, V> postProcessor) {
return this.postProcessors.remove(postProcessor);
}
/**
* Remove a listener.
* @param listener the listener.
* @return true if removed.
* @since 2.5
*/
@Override
public boolean removeListener(Listener<K, V> listener) {
return this.listeners.remove(listener);
}
@Override
public void updateConfigs(Map<String, Object> updates) {
this.configs.putAll(updates);
}
@Override
public void removeConfig(String configKey) {
this.configs.remove(configKey);
}
@Override
public Consumer<K, V> createConsumer(@Nullable String groupId, @Nullable String clientIdPrefix,
@Nullable String clientIdSuffix) {
return createKafkaConsumer(groupId, clientIdPrefix, clientIdSuffix, null);
}
@Override
public Consumer<K, V> createConsumer(@Nullable String groupId, @Nullable String clientIdPrefix,
@Nullable final String clientIdSuffixArg, @Nullable Properties properties) {
return createKafkaConsumer(groupId, clientIdPrefix, clientIdSuffixArg, properties);
}
protected Consumer<K, V> createKafkaConsumer(@Nullable String groupId, @Nullable String clientIdPrefixArg,
@Nullable String clientIdSuffixArg, @Nullable Properties properties) {
boolean overrideClientIdPrefix = StringUtils.hasText(clientIdPrefixArg);
String clientIdPrefix = clientIdPrefixArg;
String clientIdSuffix = clientIdSuffixArg;
if (clientIdPrefix == null) {
clientIdPrefix = "";
}
if (clientIdSuffix == null) {
clientIdSuffix = "";
}
boolean shouldModifyClientId = (this.configs.containsKey(ConsumerConfig.CLIENT_ID_CONFIG)
&& StringUtils.hasText(clientIdSuffix)) || overrideClientIdPrefix;
if (groupId == null
&& (properties == null || properties.stringPropertyNames().size() == 0)
&& !shouldModifyClientId) {
return createKafkaConsumer(new HashMap<>(this.configs));
}
else {
return createConsumerWithAdjustedProperties(groupId, clientIdPrefix, properties, overrideClientIdPrefix,
clientIdSuffix, shouldModifyClientId);
}
}
private Consumer<K, V> createConsumerWithAdjustedProperties(@Nullable String groupId, String clientIdPrefix,
@Nullable Properties properties, boolean overrideClientIdPrefix, String clientIdSuffix,
boolean shouldModifyClientId) {
Map<String, Object> modifiedConfigs = new HashMap<>(this.configs);
if (groupId != null) {
modifiedConfigs.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
}
if (shouldModifyClientId) {
modifiedConfigs.put(ConsumerConfig.CLIENT_ID_CONFIG,
(overrideClientIdPrefix ? clientIdPrefix
: modifiedConfigs.get(ConsumerConfig.CLIENT_ID_CONFIG)) + clientIdSuffix);
}
if (properties != null) {
Set<String> stringPropertyNames = properties.stringPropertyNames(); // to get any nested default Properties
stringPropertyNames
.stream()
.filter(name -> !name.equals(ConsumerConfig.CLIENT_ID_CONFIG)
&& !name.equals(ConsumerConfig.GROUP_ID_CONFIG))
.forEach(name -> modifiedConfigs.put(name, properties.getProperty(name)));
properties.entrySet().stream()
.filter(entry -> !entry.getKey().equals(ConsumerConfig.CLIENT_ID_CONFIG)
&& !entry.getKey().equals(ConsumerConfig.GROUP_ID_CONFIG)
&& !stringPropertyNames.contains(entry.getKey())
&& entry.getKey() instanceof String)
.forEach(entry -> modifiedConfigs.put((String) entry.getKey(), entry.getValue()));
checkInaccessible(properties, modifiedConfigs);
}
return createKafkaConsumer(modifiedConfigs);
}
private void checkInaccessible(Properties properties, Map<String, Object> modifiedConfigs) {
List<Object> inaccessible = null;
for (Enumeration<?> propertyNames = properties.propertyNames(); propertyNames.hasMoreElements(); ) {
Object nextElement = propertyNames.nextElement();
if (!modifiedConfigs.containsKey(nextElement)) {
if (inaccessible == null) {
inaccessible = new ArrayList<>();
}
inaccessible.add(nextElement);
}
}
if (inaccessible != null) {
LOGGER.error("Non-String-valued default properties are inaccessible; use String values or "
+ "make them explicit properties instead of defaults: "
+ inaccessible);
}
}
@SuppressWarnings("resource")
protected Consumer<K, V> createKafkaConsumer(Map<String, Object> configProps) {
checkBootstrap(configProps);
Consumer<K, V> kafkaConsumer = createRawConsumer(configProps);
if (this.listeners.size() > 0) {
Map<MetricName, ? extends Metric> metrics = kafkaConsumer.metrics();
Iterator<MetricName> metricIterator = metrics.keySet().iterator();
String clientId;
if (metricIterator.hasNext()) {
clientId = metricIterator.next().tags().get("client-id");
}
else {
clientId = "unknown";
}
String id = this.beanName + "." + clientId;
kafkaConsumer = createProxy(kafkaConsumer, id);
for (Listener<K, V> listener : this.listeners) {
listener.consumerAdded(id, kafkaConsumer);
}
}
for (ConsumerPostProcessor<K, V> pp : this.postProcessors) {
kafkaConsumer = pp.apply(kafkaConsumer);
}
return kafkaConsumer;
}
/**
* Create a Consumer.
* @param configProps the configuration properties.
* @return the consumer.
* @since 2.5
*/
protected Consumer<K, V> createRawConsumer(Map<String, Object> configProps) {
return new KafkaConsumer<>(configProps, this.keyDeserializerSupplier.get(),
this.valueDeserializerSupplier.get());
}
@SuppressWarnings("unchecked")
private Consumer<K, V> createProxy(Consumer<K, V> kafkaConsumer, String id) {
ProxyFactory pf = new ProxyFactory(kafkaConsumer);
Advice advice = new MethodInterceptor() {
@Override
public Object invoke(MethodInvocation invocation) throws Throwable {
DefaultKafkaConsumerFactory.this.listeners.forEach(listener ->
listener.consumerRemoved(id, kafkaConsumer));
return invocation.proceed();
}
};
NameMatchMethodPointcutAdvisor advisor = new NameMatchMethodPointcutAdvisor(advice);
advisor.addMethodName("close");
pf.addAdvisor(advisor);
return (Consumer<K, V>) pf.getProxy();
}
@Override
public boolean isAutoCommit() {
Object auto = this.configs.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG);
return auto instanceof Boolean ? (Boolean) auto
: auto instanceof String ? Boolean.valueOf((String) auto) : true;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/DefaultKafkaProducerFactory.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiPredicate;
import java.util.function.Supplier;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.ConsumerGroupMetadata;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.OutOfOrderSequenceException;
import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.serialization.Serializer;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.BeanNameAware;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.ApplicationListener;
import org.springframework.context.event.ContextStoppedEvent;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.KafkaException;
import org.springframework.kafka.support.TransactionSupport;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
/**
* The {@link ProducerFactory} implementation for a {@code singleton} shared {@link Producer} instance.
* <p>
* This implementation will return the same {@link Producer} instance (if transactions are
* not enabled) for the provided {@link Map} {@code configs} and optional {@link Serializer}
* implementations on each {@link #createProducer()} invocation.
* <p>
* If you are using {@link Serializer}s that have no-arg constructors and require no setup, then simplest to
* specify {@link Serializer} classes against {@link ProducerConfig#KEY_SERIALIZER_CLASS_CONFIG} and
* {@link ProducerConfig#VALUE_SERIALIZER_CLASS_CONFIG} keys in the {@code configs} passed to the
* {@link DefaultKafkaProducerFactory} constructor.
* <p>
* If that is not possible, but you are sure that at least one of the following is true:
* <ul>
* <li>only one {@link Producer} will use the {@link Serializer}s</li>
* <li>you are using {@link Serializer}s that may be shared between {@link Producer} instances (and specifically
* that their close() method is a no-op)</li>
* <li>you are certain that there is no risk of any single {@link Producer} being closed while other
* {@link Producer} instances with the same {@link Serializer}s are in use</li>
* </ul>
* then you can pass in {@link Serializer} instances for one or both of the key and value serializers.
* <p>
* If none of the above is true then you may provide a {@link Supplier} function for one or both {@link Serializer}s
* which will be used to obtain {@link Serializer}(s) each time a {@link Producer} is created by the factory.
* <p>
* The {@link Producer} is wrapped and the underlying {@link KafkaProducer} instance is
* not actually closed when {@link Producer#close()} is invoked. The {@link KafkaProducer}
* is physically closed when {@link DisposableBean#destroy()} is invoked or when the
* application context publishes a {@link ContextStoppedEvent}. You can also invoke
* {@link #reset()}.
* <p>
* Setting {@link #setTransactionIdPrefix(String)} enables transactions; in which case, a
* cache of producers is maintained; closing a producer returns it to the cache. The
* producers are closed and the cache is cleared when the factory is destroyed, the
* application context stopped, or the {@link #reset()} method is called.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @author Murali Reddy
* @author Nakul Mishra
* @author Artem Bilan
* @author Chris Gilbert
* @author Thomas Strauß
*/
public class DefaultKafkaProducerFactory<K, V> extends KafkaResourceFactory
implements ProducerFactory<K, V>, ApplicationContextAware,
BeanNameAware, ApplicationListener<ContextStoppedEvent>, DisposableBean {
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(DefaultKafkaProducerFactory.class));
private final Map<String, Object> configs;
private final AtomicInteger transactionIdSuffix = new AtomicInteger();
private final Map<String, BlockingQueue<CloseSafeProducer<K, V>>> cache = new ConcurrentHashMap<>();
private final Map<String, CloseSafeProducer<K, V>> consumerProducers = new HashMap<>();
private final ThreadLocal<CloseSafeProducer<K, V>> threadBoundProducers = new ThreadLocal<>();
private final AtomicInteger epoch = new AtomicInteger();
private final AtomicInteger clientIdCounter = new AtomicInteger();
private final List<Listener<K, V>> listeners = new ArrayList<>();
private final List<ProducerPostProcessor<K, V>> postProcessors = new ArrayList<>();
private Supplier<Serializer<K>> keySerializerSupplier;
private Supplier<Serializer<V>> valueSerializerSupplier;
private Supplier<Serializer<K>> rawKeySerializerSupplier;
private Supplier<Serializer<V>> rawValueSerializerSupplier;
private Duration physicalCloseTimeout = DEFAULT_PHYSICAL_CLOSE_TIMEOUT;
private ApplicationContext applicationContext;
private String beanName = "not.managed.by.Spring";
private boolean producerPerConsumerPartition = true;
private boolean producerPerThread;
private long maxAge;
private volatile String transactionIdPrefix;
private volatile String clientIdPrefix;
private volatile CloseSafeProducer<K, V> producer;
/**
* Construct a factory with the provided configuration.
* @param configs the configuration.
*/
public DefaultKafkaProducerFactory(Map<String, Object> configs) {
this(configs, () -> null, () -> null);
}
/**
* Construct a factory with the provided configuration and {@link Serializer}s.
* Also configures a {@link #transactionIdPrefix} as a value from the
* {@link ProducerConfig#TRANSACTIONAL_ID_CONFIG} if provided.
* This config is going to be overridden with a suffix for target {@link Producer} instance.
* The serializers' {@code configure()} methods will be called with the
* configuration map.
* @param configs the configuration.
* @param keySerializer the key {@link Serializer}.
* @param valueSerializer the value {@link Serializer}.
*/
public DefaultKafkaProducerFactory(Map<String, Object> configs,
@Nullable Serializer<K> keySerializer,
@Nullable Serializer<V> valueSerializer) {
this(configs, () -> keySerializer, () -> valueSerializer);
}
/**
* Construct a factory with the provided configuration and {@link Serializer} Suppliers.
* Also configures a {@link #transactionIdPrefix} as a value from the
* {@link ProducerConfig#TRANSACTIONAL_ID_CONFIG} if provided.
* This config is going to be overridden with a suffix for target {@link Producer} instance.
* When the suppliers are invoked to get an instance, the serializers'
* {@code configure()} methods will be called with the configuration map.
* @param configs the configuration.
* @param keySerializerSupplier the key {@link Serializer} supplier function.
* @param valueSerializerSupplier the value {@link Serializer} supplier function.
* @since 2.3
*/
public DefaultKafkaProducerFactory(Map<String, Object> configs,
@Nullable Supplier<Serializer<K>> keySerializerSupplier,
@Nullable Supplier<Serializer<V>> valueSerializerSupplier) {
this.configs = new ConcurrentHashMap<>(configs);
this.keySerializerSupplier = keySerializerSupplier(keySerializerSupplier);
this.valueSerializerSupplier = valueSerializerSupplier(valueSerializerSupplier);
if (this.clientIdPrefix == null && configs.get(ProducerConfig.CLIENT_ID_CONFIG) instanceof String) {
this.clientIdPrefix = (String) configs.get(ProducerConfig.CLIENT_ID_CONFIG);
}
String txId = (String) this.configs.get(ProducerConfig.TRANSACTIONAL_ID_CONFIG);
if (StringUtils.hasText(txId)) {
setTransactionIdPrefix(txId);
this.configs.remove(ProducerConfig.TRANSACTIONAL_ID_CONFIG);
}
}
private Supplier<Serializer<K>> keySerializerSupplier(@Nullable Supplier<Serializer<K>> keySerializerSupplier) {
this.rawKeySerializerSupplier = keySerializerSupplier;
return keySerializerSupplier == null
? () -> null
: () -> {
Serializer<K> serializer = keySerializerSupplier.get();
if (serializer != null) {
serializer.configure(this.configs, true);
}
return serializer;
};
}
private Supplier<Serializer<V>> valueSerializerSupplier(@Nullable Supplier<Serializer<V>> valueSerializerSupplier) {
this.rawValueSerializerSupplier = valueSerializerSupplier;
return valueSerializerSupplier == null
? () -> null
: () -> {
Serializer<V> serializer = valueSerializerSupplier.get();
if (serializer != null) {
serializer.configure(this.configs, false);
}
return serializer;
};
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
@Override
public void setBeanName(String name) {
this.beanName = name;
}
/**
* Set a key serializer.
* @param keySerializer the key serializer.
*/
public void setKeySerializer(@Nullable Serializer<K> keySerializer) {
this.keySerializerSupplier = keySerializerSupplier(() -> keySerializer);
}
/**
* Set a value serializer.
* @param valueSerializer the value serializer.
*/
public void setValueSerializer(@Nullable Serializer<V> valueSerializer) {
this.valueSerializerSupplier = valueSerializerSupplier(() -> valueSerializer);
}
/**
* Set a supplier to supply instances of the key serializer.
* @param keySerializerSupplier the supplier.
* @since 2.8
*/
public void setKeySerializerSupplier(Supplier<Serializer<K>> keySerializerSupplier) {
this.keySerializerSupplier = keySerializerSupplier;
}
/**
* Set a supplier to supply instances of the value serializer.
* @param valueSerializerSupplier the supplier.
* @since 2.8
*/
public void setValueSerializerSupplier(Supplier<Serializer<V>> valueSerializerSupplier) {
this.valueSerializerSupplier = valueSerializerSupplier;
}
/**
* The time to wait when physically closing the producer via the factory rather than
* closing the producer itself (when {@link #reset()}, {@link #destroy()
* #closeProducerFor(String)}, or {@link #closeThreadBoundProducer()} are invoked).
* Specified in seconds; default {@link #DEFAULT_PHYSICAL_CLOSE_TIMEOUT}.
* @param physicalCloseTimeout the timeout in seconds.
* @since 1.0.7
*/
public void setPhysicalCloseTimeout(int physicalCloseTimeout) {
this.physicalCloseTimeout = Duration.ofSeconds(physicalCloseTimeout);
}
/**
* Get the physical close timeout.
* @return the timeout.
* @since 2.5
*/
@Override
public Duration getPhysicalCloseTimeout() {
return this.physicalCloseTimeout;
}
/**
* Set a prefix for the {@link ProducerConfig#TRANSACTIONAL_ID_CONFIG} config. By
* default a {@link ProducerConfig#TRANSACTIONAL_ID_CONFIG} value from configs is used
* as a prefix in the target producer configs.
* @param transactionIdPrefix the prefix.
* @since 1.3
*/
public final void setTransactionIdPrefix(String transactionIdPrefix) {
Assert.notNull(transactionIdPrefix, "'transactionIdPrefix' cannot be null");
this.transactionIdPrefix = transactionIdPrefix;
enableIdempotentBehaviour();
}
@Override
public @Nullable String getTransactionIdPrefix() {
return this.transactionIdPrefix;
}
/**
* Set to true to create a producer per thread instead of singleton that is shared by
* all clients. Clients <b>must</b> call {@link #closeThreadBoundProducer()} to
* physically close the producer when it is no longer needed. These producers will not
* be closed by {@link #destroy()} or {@link #reset()}.
* @param producerPerThread true for a producer per thread.
* @since 2.3
* @see #closeThreadBoundProducer()
*/
public void setProducerPerThread(boolean producerPerThread) {
this.producerPerThread = producerPerThread;
}
@Override
public boolean isProducerPerThread() {
return this.producerPerThread;
}
/**
* Set to false to revert to the previous behavior of a simple incrementing
* transactional.id suffix for each producer instead of maintaining a producer
* for each group/topic/partition.
* @param producerPerConsumerPartition false to revert.
* @since 1.3.7
*/
public void setProducerPerConsumerPartition(boolean producerPerConsumerPartition) {
this.producerPerConsumerPartition = producerPerConsumerPartition;
}
/**
* Return the producerPerConsumerPartition.
* @return the producerPerConsumerPartition.
* @since 1.3.8
*/
@Override
public boolean isProducerPerConsumerPartition() {
return this.producerPerConsumerPartition;
}
@Override
@Nullable
public Serializer<K> getKeySerializer() {
return this.keySerializerSupplier.get();
}
@Override
@Nullable
public Serializer<V> getValueSerializer() {
return this.valueSerializerSupplier.get();
}
@Override
public Supplier<Serializer<K>> getKeySerializerSupplier() {
return this.rawKeySerializerSupplier;
}
@Override
public Supplier<Serializer<V>> getValueSerializerSupplier() {
return this.rawValueSerializerSupplier;
}
/**
* Return an unmodifiable reference to the configuration map for this factory.
* Useful for cloning to make a similar factory.
* @return the configs.
* @since 1.3
*/
@Override
public Map<String, Object> getConfigurationProperties() {
Map<String, Object> configs2 = new HashMap<>(this.configs);
checkBootstrap(configs2);
return Collections.unmodifiableMap(configs2);
}
/**
* Get the current list of listeners.
* @return the listeners.
* @since 2.5
*/
@Override
public List<Listener<K, V>> getListeners() {
return Collections.unmodifiableList(this.listeners);
}
@Override
public List<ProducerPostProcessor<K, V>> getPostProcessors() {
return Collections.unmodifiableList(this.postProcessors);
}
/**
* Set the maximum age for a producer; useful when using transactions and the broker
* might expire a {@code transactional.id} due to inactivity.
* @param maxAge the maxAge to set
* @since 2.5.8
*/
public void setMaxAge(Duration maxAge) {
this.maxAge = maxAge.toMillis();
}
/**
* Copy properties of the instance and the given properties to create a new producer factory.
* <p>If the {@link org.springframework.kafka.core.DefaultKafkaProducerFactory} makes a
* copy of itself, the transaction id prefix is recovered from the properties. If
* you want to change the ID config, add a new
* {@link org.apache.kafka.clients.producer.ProducerConfig#TRANSACTIONAL_ID_CONFIG}
* key to the override config.</p>
* @param overrideProperties the properties to be applied to the new factory
* @return {@link org.springframework.kafka.core.DefaultKafkaProducerFactory} with
* properties applied
*/
@Override
public ProducerFactory<K, V> copyWithConfigurationOverride(Map<String, Object> overrideProperties) {
Map<String, Object> producerProperties = new HashMap<>(getConfigurationProperties());
producerProperties.putAll(overrideProperties);
producerProperties = ensureExistingTransactionIdPrefixInProperties(producerProperties);
DefaultKafkaProducerFactory<K, V> newFactory =
new DefaultKafkaProducerFactory<>(producerProperties,
getKeySerializerSupplier(),
getValueSerializerSupplier());
newFactory.setPhysicalCloseTimeout((int) getPhysicalCloseTimeout().getSeconds());
newFactory.setProducerPerConsumerPartition(isProducerPerConsumerPartition());
newFactory.setProducerPerThread(isProducerPerThread());
for (ProducerPostProcessor<K, V> templatePostProcessor : getPostProcessors()) {
newFactory.addPostProcessor(templatePostProcessor);
}
for (ProducerFactory.Listener<K, V> templateListener : getListeners()) {
newFactory.addListener(templateListener);
}
return newFactory;
}
/**
* Ensures that the returned properties map contains a transaction id prefix.
* The {@link org.springframework.kafka.core.DefaultKafkaProducerFactory}
* modifies the local properties copy, the txn key is removed and
* stored locally in a property. To make a proper copy of the properties in a
* new factory, the transactionId has to be reinserted prior use.
* The incoming properties are checked for a transactionId key. If none is
* there, the one existing in the factory is added.
* @param producerProperties the properties to be used for the new factory
* @return the producerProperties or a copy with the transaction ID set
*/
private Map<String, Object> ensureExistingTransactionIdPrefixInProperties(Map<String, Object> producerProperties) {
String txIdPrefix = getTransactionIdPrefix();
if (StringUtils.hasText(txIdPrefix)
&& !producerProperties.containsKey(ProducerConfig.TRANSACTIONAL_ID_CONFIG)) {
Map<String, Object> producerPropertiesWithTxnId = new HashMap<>(producerProperties);
producerPropertiesWithTxnId.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, txIdPrefix);
return producerPropertiesWithTxnId;
}
return producerProperties;
}
/**
* Add a listener.
* @param listener the listener.
* @since 2.5
*/
@Override
public void addListener(Listener<K, V> listener) {
Assert.notNull(listener, "'listener' cannot be null");
this.listeners.add(listener);
}
/**
* Add a listener at a specific index.
* @param index the index (list position).
* @param listener the listener.
* @since 2.5
*/
@Override
public void addListener(int index, Listener<K, V> listener) {
Assert.notNull(listener, "'listener' cannot be null");
if (index >= this.listeners.size()) {
this.listeners.add(listener);
}
else {
this.listeners.add(index, listener);
}
}
/**
* Remove a listener.
* @param listener the listener.
* @return true if removed.
* @since 2.5
*/
@Override
public boolean removeListener(Listener<K, V> listener) {
return this.listeners.remove(listener);
}
@Override
public void addPostProcessor(ProducerPostProcessor<K, V> postProcessor) {
Assert.notNull(postProcessor, "'postProcessor' cannot be null");
this.postProcessors.add(postProcessor);
}
@Override
public boolean removePostProcessor(ProducerPostProcessor<K, V> postProcessor) {
return this.postProcessors.remove(postProcessor);
}
@Override
public void updateConfigs(Map<String, Object> updates) {
updates.entrySet().forEach(entry -> {
if (entry.getKey().equals(ProducerConfig.TRANSACTIONAL_ID_CONFIG)) {
Assert.isTrue(entry.getValue() instanceof String, () -> "'" + ProducerConfig.TRANSACTIONAL_ID_CONFIG
+ "' must be a String, not a " + entry.getClass().getName());
Assert.isTrue(this.transactionIdPrefix != null
? entry.getValue() != null
: entry.getValue() == null,
"Cannot change transactional capability");
this.transactionIdPrefix = (String) entry.getValue();
}
else if (entry.getKey().equals(ProducerConfig.CLIENT_ID_CONFIG)) {
Assert.isTrue(entry.getValue() instanceof String, () -> "'" + ProducerConfig.CLIENT_ID_CONFIG
+ "' must be a String, not a " + entry.getClass().getName());
this.clientIdPrefix = (String) entry.getValue();
}
else {
this.configs.put(entry.getKey(), entry.getValue());
}
});
}
@Override
public void removeConfig(String configKey) {
this.configs.remove(configKey);
}
/**
* When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream.
*/
private void enableIdempotentBehaviour() {
Object previousValue = this.configs.putIfAbsent(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
if (Boolean.FALSE.equals(previousValue)) {
LOGGER.debug(() -> "The '" + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG
+ "' is set to false, may result in duplicate messages");
}
}
@Override
public boolean transactionCapable() {
return this.transactionIdPrefix != null;
}
@SuppressWarnings("resource")
@Override
public void destroy() {
CloseSafeProducer<K, V> producerToClose;
synchronized (this) {
producerToClose = this.producer;
this.producer = null;
}
if (producerToClose != null) {
producerToClose.closeDelegate(this.physicalCloseTimeout, this.listeners);
}
this.cache.values().forEach(queue -> {
CloseSafeProducer<K, V> next = queue.poll();
while (next != null) {
try {
next.closeDelegate(this.physicalCloseTimeout, this.listeners);
}
catch (Exception e) {
LOGGER.error(e, "Exception while closing producer");
}
next = queue.poll();
}
});
synchronized (this.consumerProducers) {
this.consumerProducers.forEach(
(k, v) -> v.closeDelegate(this.physicalCloseTimeout, this.listeners));
this.consumerProducers.clear();
}
this.epoch.incrementAndGet();
}
@Override
public void onApplicationEvent(ContextStoppedEvent event) {
if (event.getApplicationContext().equals(this.applicationContext)) {
reset();
}
}
/**
* Close the {@link Producer}(s) and clear the cache of transactional
* {@link Producer}(s).
* @since 2.2
*/
@Override
public void reset() {
try {
destroy();
}
catch (Exception e) {
LOGGER.error(e, "Exception while closing producer");
}
}
@Override
public Producer<K, V> createProducer() {
return createProducer(this.transactionIdPrefix);
}
@Override
public Producer<K, V> createProducer(@Nullable String txIdPrefixArg) {
String txIdPrefix = txIdPrefixArg == null ? this.transactionIdPrefix : txIdPrefixArg;
return doCreateProducer(txIdPrefix);
}
@Override
public Producer<K, V> createNonTransactionalProducer() {
return doCreateProducer(null);
}
private Producer<K, V> doCreateProducer(@Nullable String txIdPrefix) {
if (txIdPrefix != null) {
if (this.producerPerConsumerPartition) {
return createTransactionalProducerForPartition(txIdPrefix);
}
else {
return createTransactionalProducer(txIdPrefix);
}
}
if (this.producerPerThread) {
return getOrCreateThreadBoundProducer();
}
synchronized (this) {
if (this.producer != null && expire(this.producer)) {
this.producer = null;
}
if (this.producer == null) {
this.producer = new CloseSafeProducer<>(createKafkaProducer(), this::removeProducer,
this.physicalCloseTimeout, this.beanName, this.epoch.get());
this.listeners.forEach(listener -> listener.producerAdded(this.producer.clientId, this.producer));
}
return this.producer;
}
}
private Producer<K, V> getOrCreateThreadBoundProducer() {
CloseSafeProducer<K, V> tlProducer = this.threadBoundProducers.get();
if (tlProducer != null && (this.epoch.get() != tlProducer.epoch || expire(tlProducer))) {
closeThreadBoundProducer();
tlProducer = null;
}
if (tlProducer == null) {
tlProducer = new CloseSafeProducer<>(createKafkaProducer(), this::removeProducer,
this.physicalCloseTimeout, this.beanName, this.epoch.get());
for (Listener<K, V> listener : this.listeners) {
listener.producerAdded(tlProducer.clientId, tlProducer);
}
this.threadBoundProducers.set(tlProducer);
}
return tlProducer;
}
/**
* Subclasses must return a raw producer which will be wrapped in a {@link CloseSafeProducer}.
* @return the producer.
*/
protected Producer<K, V> createKafkaProducer() {
return createRawProducer(getProducerConfigs());
}
protected Producer<K, V> createTransactionalProducerForPartition() {
return createTransactionalProducerForPartition(this.transactionIdPrefix);
}
protected Producer<K, V> createTransactionalProducerForPartition(String txIdPrefix) {
String suffix = TransactionSupport.getTransactionIdSuffix();
if (suffix == null) {
return createTransactionalProducer(txIdPrefix);
}
else {
synchronized (this.consumerProducers) {
CloseSafeProducer<K, V> consumerProducer = this.consumerProducers.get(suffix);
if (consumerProducer == null || expire(consumerProducer)) {
CloseSafeProducer<K, V> newProducer = doCreateTxProducer(txIdPrefix, suffix,
this::removeConsumerProducer);
this.consumerProducers.put(suffix, newProducer);
return newProducer;
}
else {
return consumerProducer;
}
}
}
}
private boolean removeConsumerProducer(CloseSafeProducer<K, V> producerToRemove, Duration timeout) {
if (producerToRemove.closed) {
synchronized (this.consumerProducers) {
Iterator<Entry<String, CloseSafeProducer<K, V>>> iterator = this.consumerProducers.entrySet().iterator();
while (iterator.hasNext()) {
if (iterator.next().getValue().equals(producerToRemove)) {
iterator.remove();
producerToRemove.closeDelegate(timeout, this.listeners);
return true;
}
}
}
return true;
}
return false;
}
/**
* Remove the single shared producer and a thread-bound instance if present.
* @param producerToRemove the producer.
* @param timeout the close timeout.
* @return always true.
* @since 2.2.13
*/
protected final synchronized boolean removeProducer(CloseSafeProducer<K, V> producerToRemove, Duration timeout) {
if (producerToRemove.closed) {
if (producerToRemove.equals(this.producer)) {
this.producer = null;
producerToRemove.closeDelegate(timeout, this.listeners);
}
this.threadBoundProducers.remove();
return true;
}
else {
return false;
}
}
/**
* Subclasses must return a producer from the {@link #getCache()} or a
* new raw producer wrapped in a {@link CloseSafeProducer}.
* @return the producer - cannot be null.
* @since 1.3
*/
protected Producer<K, V> createTransactionalProducer() {
return createTransactionalProducer(this.transactionIdPrefix);
}
protected Producer<K, V> createTransactionalProducer(String txIdPrefix) {
BlockingQueue<CloseSafeProducer<K, V>> queue = getCache(txIdPrefix);
Assert.notNull(queue, () -> "No cache found for " + txIdPrefix);
CloseSafeProducer<K, V> cachedProducer = queue.poll();
while (cachedProducer != null) {
if (expire(cachedProducer)) {
cachedProducer = queue.poll();
}
else {
break;
}
}
if (cachedProducer == null) {
return doCreateTxProducer(txIdPrefix, "" + this.transactionIdSuffix.getAndIncrement(), this::cacheReturner);
}
else {
return cachedProducer;
}
}
private boolean expire(CloseSafeProducer<K, V> producer) {
boolean expired = this.maxAge > 0 && System.currentTimeMillis() - producer.created > this.maxAge;
if (expired) {
producer.closeDelegate(this.physicalCloseTimeout, this.listeners);
}
return expired;
}
boolean cacheReturner(CloseSafeProducer<K, V> producerToRemove, Duration timeout) {
if (producerToRemove.closed) {
producerToRemove.closeDelegate(timeout, this.listeners);
return true;
}
else {
synchronized (this.cache) {
BlockingQueue<CloseSafeProducer<K, V>> txIdCache = getCache(producerToRemove.txIdPrefix);
if (producerToRemove.epoch != this.epoch.get()
|| (txIdCache != null && !txIdCache.contains(producerToRemove)
&& !txIdCache.offer(producerToRemove))) {
producerToRemove.closeDelegate(timeout, this.listeners);
return true;
}
}
return false;
}
}
private CloseSafeProducer<K, V> doCreateTxProducer(String prefix, String suffix,
BiPredicate<CloseSafeProducer<K, V>, Duration> remover) {
Producer<K, V> newProducer = createRawProducer(getTxProducerConfigs(prefix + suffix));
try {
newProducer.initTransactions();
}
catch (RuntimeException ex) {
try {
newProducer.close(this.physicalCloseTimeout);
}
catch (RuntimeException ex2) {
KafkaException newEx = new KafkaException("initTransactions() failed and then close() failed", ex);
newEx.addSuppressed(ex2);
throw newEx; // NOSONAR - lost stack trace
}
throw new KafkaException("initTransactions() failed", ex);
}
CloseSafeProducer<K, V> closeSafeProducer =
new CloseSafeProducer<>(newProducer, remover, prefix, this.physicalCloseTimeout, this.beanName,
this.epoch.get());
this.listeners.forEach(listener -> listener.producerAdded(closeSafeProducer.clientId, closeSafeProducer));
return closeSafeProducer;
}
protected Producer<K, V> createRawProducer(Map<String, Object> rawConfigs) {
Producer<K, V> kafkaProducer =
new KafkaProducer<>(rawConfigs, this.keySerializerSupplier.get(), this.valueSerializerSupplier.get());
for (ProducerPostProcessor<K, V> pp : this.postProcessors) {
kafkaProducer = pp.apply(kafkaProducer);
}
return kafkaProducer;
}
@Nullable
protected BlockingQueue<CloseSafeProducer<K, V>> getCache() {
return getCache(this.transactionIdPrefix);
}
@Nullable
protected BlockingQueue<CloseSafeProducer<K, V>> getCache(String txIdPrefix) {
if (txIdPrefix == null) {
return null;
}
return this.cache.computeIfAbsent(txIdPrefix, txId -> new LinkedBlockingQueue<>());
}
@Override
public void closeProducerFor(String suffix) {
if (this.producerPerConsumerPartition) {
synchronized (this.consumerProducers) {
CloseSafeProducer<K, V> removed = this.consumerProducers.remove(suffix);
if (removed != null) {
removed.closeDelegate(this.physicalCloseTimeout, this.listeners);
}
}
}
}
/**
* When using {@link #setProducerPerThread(boolean)} (true), call this method to close
* and release this thread's producer. Thread bound producers are <b>not</b> closed by
* {@link #destroy()} or {@link #reset()} methods.
* @since 2.3
* @see #setProducerPerThread(boolean)
*/
@Override
public void closeThreadBoundProducer() {
CloseSafeProducer<K, V> tlProducer = this.threadBoundProducers.get();
if (tlProducer != null) {
this.threadBoundProducers.remove();
tlProducer.closeDelegate(this.physicalCloseTimeout, this.listeners);
}
}
/**
* Return the configuration of a producer.
* @return the configuration of a producer.
* @since 2.8.3
* @see #createKafkaProducer()
*/
protected Map<String, Object> getProducerConfigs() {
final Map<String, Object> newProducerConfigs = new HashMap<>(this.configs);
checkBootstrap(newProducerConfigs);
if (this.clientIdPrefix != null) {
newProducerConfigs.put(ProducerConfig.CLIENT_ID_CONFIG,
this.clientIdPrefix + "-" + this.clientIdCounter.incrementAndGet());
}
return newProducerConfigs;
}
/**
* Return the configuration of a transactional producer.
* @param transactionId the transactionId.
* @return the configuration of a transactional producer.
* @since 2.8.3
* @see #doCreateTxProducer(String, String, BiPredicate)
*/
protected Map<String, Object> getTxProducerConfigs(String transactionId) {
final Map<String, Object> newProducerConfigs = getProducerConfigs();
newProducerConfigs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionId);
return newProducerConfigs;
}
/**
* A wrapper class for the delegate.
*
* @param <K> the key type.
* @param <V> the value type.
*
*/
protected static class CloseSafeProducer<K, V> implements Producer<K, V> {
private static final Duration CLOSE_TIMEOUT_AFTER_TX_TIMEOUT = Duration.ofMillis(0);
private final Producer<K, V> delegate;
private final BiPredicate<CloseSafeProducer<K, V>, Duration> removeProducer;
final String txIdPrefix; // NOSONAR
final long created; // NOSONAR
private final Duration closeTimeout;
final String clientId; // NOSONAR
final int epoch; // NOSONAR
private volatile Exception producerFailed;
volatile boolean closed; // NOSONAR
CloseSafeProducer(Producer<K, V> delegate,
BiPredicate<CloseSafeProducer<K, V>, Duration> removeConsumerProducer, Duration closeTimeout,
String factoryName, int epoch) {
this(delegate, removeConsumerProducer, null, closeTimeout, factoryName, epoch);
}
CloseSafeProducer(Producer<K, V> delegate,
BiPredicate<CloseSafeProducer<K, V>, Duration> removeProducer, @Nullable String txIdPrefix,
Duration closeTimeout, String factoryName, int epoch) {
Assert.isTrue(!(delegate instanceof CloseSafeProducer), "Cannot double-wrap a producer");
this.delegate = delegate;
this.removeProducer = removeProducer;
this.txIdPrefix = txIdPrefix;
this.closeTimeout = closeTimeout;
Map<MetricName, ? extends Metric> metrics = delegate.metrics();
Iterator<MetricName> metricIterator = metrics.keySet().iterator();
String id;
if (metricIterator.hasNext()) {
id = metricIterator.next().tags().get("client-id");
}
else {
id = "unknown";
}
this.clientId = factoryName + "." + id;
this.created = System.currentTimeMillis();
this.epoch = epoch;
LOGGER.debug(() -> "Created new Producer: " + this);
}
Producer<K, V> getDelegate() {
return this.delegate;
}
@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> record) {
LOGGER.trace(() -> toString() + " send(" + record + ")");
return this.delegate.send(record);
}
@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) {
LOGGER.trace(() -> toString() + " send(" + record + ")");
return this.delegate.send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception instanceof OutOfOrderSequenceException) {
CloseSafeProducer.this.producerFailed = exception;
close(CloseSafeProducer.this.closeTimeout);
}
callback.onCompletion(metadata, exception);
}
});
}
@Override
public void flush() {
LOGGER.trace(() -> toString() + " flush()");
this.delegate.flush();
}
@Override
public List<PartitionInfo> partitionsFor(String topic) {
return this.delegate.partitionsFor(topic);
}
@Override
public Map<MetricName, ? extends Metric> metrics() {
return this.delegate.metrics();
}
@Override
public void initTransactions() {
this.delegate.initTransactions();
}
@Override
public void beginTransaction() throws ProducerFencedException {
LOGGER.debug(() -> toString() + " beginTransaction()");
try {
this.delegate.beginTransaction();
}
catch (RuntimeException e) {
LOGGER.error(e, () -> "beginTransaction failed: " + this);
this.producerFailed = e;
throw e;
}
}
@SuppressWarnings("deprecation")
@Override
public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId)
throws ProducerFencedException {
LOGGER.trace(() -> toString() + " sendOffsetsToTransaction(" + offsets + ", " + consumerGroupId + ")");
this.delegate.sendOffsetsToTransaction(offsets, consumerGroupId);
}
@Override
public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
ConsumerGroupMetadata groupMetadata) throws ProducerFencedException {
LOGGER.trace(() -> toString() + " sendOffsetsToTransaction(" + offsets + ", " + groupMetadata + ")");
this.delegate.sendOffsetsToTransaction(offsets, groupMetadata);
}
@Override
public void commitTransaction() throws ProducerFencedException {
LOGGER.debug(() -> toString() + " commitTransaction()");
try {
this.delegate.commitTransaction();
}
catch (RuntimeException e) {
LOGGER.error(e, () -> "commitTransaction failed: " + this);
this.producerFailed = e;
throw e;
}
}
@Override
public void abortTransaction() throws ProducerFencedException {
LOGGER.debug(() -> toString() + " abortTransaction()");
if (this.producerFailed != null) {
LOGGER.debug(() -> "abortTransaction ignored - previous txFailed: " + this.producerFailed.getMessage()
+ ": " + this);
}
else {
try {
this.delegate.abortTransaction();
}
catch (RuntimeException e) {
LOGGER.error(e, () -> "Abort failed: " + this);
this.producerFailed = e;
throw e;
}
}
}
@Override
public void close() {
close(null);
}
@Override
public void close(@Nullable Duration timeout) {
LOGGER.trace(() -> toString() + " close(" + (timeout == null ? "null" : timeout) + ")");
if (!this.closed) {
if (this.producerFailed != null) {
LOGGER.warn(() -> "Error during some operation; producer removed from cache: " + this);
this.closed = true;
this.removeProducer.test(this, this.producerFailed instanceof TimeoutException
? CLOSE_TIMEOUT_AFTER_TX_TIMEOUT
: timeout);
}
else {
this.closed = this.removeProducer.test(this, timeout);
}
}
}
void closeDelegate(Duration timeout, List<Listener<K, V>> listeners) {
this.delegate.close(timeout == null ? this.closeTimeout : timeout);
listeners.forEach(listener -> listener.producerRemoved(this.clientId, this));
this.closed = true;
}
@Override
public String toString() {
return "CloseSafeProducer [delegate=" + this.delegate + "]";
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/KafkaAdmin.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.CreatePartitionsResult;
import org.apache.kafka.clients.admin.CreateTopicsResult;
import org.apache.kafka.clients.admin.DescribeTopicsResult;
import org.apache.kafka.clients.admin.NewPartitions;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.common.errors.InvalidPartitionsException;
import org.apache.kafka.common.errors.TopicExistsException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.SmartInitializingSingleton;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.KafkaException;
/**
* An admin that delegates to an {@link AdminClient} to create topics defined
* in the application context.
*
* @author Gary Russell
* @author Artem Bilan
*
* @since 1.3
*/
public class KafkaAdmin extends KafkaResourceFactory
implements ApplicationContextAware, SmartInitializingSingleton, KafkaAdminOperations {
/**
* The default close timeout duration as 10 seconds.
*/
public static final Duration DEFAULT_CLOSE_TIMEOUT = Duration.ofSeconds(10);
private static final int DEFAULT_OPERATION_TIMEOUT = 30;
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(KafkaAdmin.class));
private final Map<String, Object> configs;
private ApplicationContext applicationContext;
private Duration closeTimeout = DEFAULT_CLOSE_TIMEOUT;
private int operationTimeout = DEFAULT_OPERATION_TIMEOUT;
private boolean fatalIfBrokerNotAvailable;
private boolean autoCreate = true;
private boolean initializingContext;
/**
* Create an instance with an {@link AdminClient} based on the supplied
* configuration.
* @param config the configuration for the {@link AdminClient}.
*/
public KafkaAdmin(Map<String, Object> config) {
this.configs = new HashMap<>(config);
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
/**
* Set the close timeout in seconds. Defaults to {@link #DEFAULT_CLOSE_TIMEOUT} seconds.
* @param closeTimeout the timeout.
*/
public void setCloseTimeout(int closeTimeout) {
this.closeTimeout = Duration.ofSeconds(closeTimeout);
}
/**
* Set the operation timeout in seconds. Defaults to {@value #DEFAULT_OPERATION_TIMEOUT} seconds.
* @param operationTimeout the timeout.
*/
public void setOperationTimeout(int operationTimeout) {
this.operationTimeout = operationTimeout;
}
/**
* Set to true if you want the application context to fail to load if we are unable
* to connect to the broker during initialization, to check/add topics.
* @param fatalIfBrokerNotAvailable true to fail.
*/
public void setFatalIfBrokerNotAvailable(boolean fatalIfBrokerNotAvailable) {
this.fatalIfBrokerNotAvailable = fatalIfBrokerNotAvailable;
}
/**
* Set to false to suppress auto creation of topics during context initialization.
* @param autoCreate boolean flag to indicate creating topics or not during context initialization
* @see #initialize()
*/
public void setAutoCreate(boolean autoCreate) {
this.autoCreate = autoCreate;
}
@Override
public Map<String, Object> getConfigurationProperties() {
Map<String, Object> configs2 = new HashMap<>(this.configs);
checkBootstrap(configs2);
return Collections.unmodifiableMap(configs2);
}
@Override
public void afterSingletonsInstantiated() {
this.initializingContext = true;
if (this.autoCreate) {
initialize();
}
}
/**
* Call this method to check/add topics; this might be needed if the broker was not
* available when the application context was initialized, and
* {@link #setFatalIfBrokerNotAvailable(boolean) fatalIfBrokerNotAvailable} is false,
* or {@link #setAutoCreate(boolean) autoCreate} was set to false.
* @return true if successful.
* @see #setFatalIfBrokerNotAvailable(boolean)
* @see #setAutoCreate(boolean)
*/
public final boolean initialize() {
Collection<NewTopic> newTopics = new ArrayList<>(
this.applicationContext.getBeansOfType(NewTopic.class, false, false).values());
Collection<NewTopics> wrappers = this.applicationContext.getBeansOfType(NewTopics.class, false, false).values();
wrappers.forEach(wrapper -> newTopics.addAll(wrapper.getNewTopics()));
if (newTopics.size() > 0) {
AdminClient adminClient = null;
try {
adminClient = createAdmin();
}
catch (Exception e) {
if (!this.initializingContext || this.fatalIfBrokerNotAvailable) {
throw new IllegalStateException("Could not create admin", e);
}
else {
LOGGER.error(e, "Could not create admin");
}
}
if (adminClient != null) {
try {
addOrModifyTopicsIfNeeded(adminClient, newTopics);
return true;
}
catch (Exception e) {
if (!this.initializingContext || this.fatalIfBrokerNotAvailable) {
throw new IllegalStateException("Could not configure topics", e);
}
else {
LOGGER.error(e, "Could not configure topics");
}
}
finally {
this.initializingContext = false;
adminClient.close(this.closeTimeout);
}
}
}
this.initializingContext = false;
return false;
}
@Override
public void createOrModifyTopics(NewTopic... topics) {
try (AdminClient client = createAdmin()) {
addOrModifyTopicsIfNeeded(client, Arrays.asList(topics));
}
}
@Override
public Map<String, TopicDescription> describeTopics(String... topicNames) {
try (AdminClient admin = createAdmin()) {
Map<String, TopicDescription> results = new HashMap<>();
DescribeTopicsResult topics = admin.describeTopics(Arrays.asList(topicNames));
try {
results.putAll(topics.all().get(this.operationTimeout, TimeUnit.SECONDS));
return results;
}
catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new KafkaException("Interrupted while getting topic descriptions", ie);
}
catch (TimeoutException | ExecutionException ex) {
throw new KafkaException("Failed to obtain topic descriptions", ex);
}
}
}
private AdminClient createAdmin() {
Map<String, Object> configs2 = new HashMap<>(this.configs);
checkBootstrap(configs2);
return AdminClient.create(configs2);
}
private void addOrModifyTopicsIfNeeded(AdminClient adminClient, Collection<NewTopic> topics) {
if (topics.size() > 0) {
Map<String, NewTopic> topicNameToTopic = new HashMap<>();
topics.forEach(t -> topicNameToTopic.compute(t.name(), (k, v) -> t));
DescribeTopicsResult topicInfo = adminClient
.describeTopics(topics.stream()
.map(NewTopic::name)
.collect(Collectors.toList()));
List<NewTopic> topicsToAdd = new ArrayList<>();
Map<String, NewPartitions> topicsToModify = checkPartitions(topicNameToTopic, topicInfo, topicsToAdd);
if (topicsToAdd.size() > 0) {
addTopics(adminClient, topicsToAdd);
}
if (topicsToModify.size() > 0) {
modifyTopics(adminClient, topicsToModify);
}
}
}
private Map<String, NewPartitions> checkPartitions(Map<String, NewTopic> topicNameToTopic,
DescribeTopicsResult topicInfo, List<NewTopic> topicsToAdd) {
Map<String, NewPartitions> topicsToModify = new HashMap<>();
topicInfo.values().forEach((n, f) -> {
NewTopic topic = topicNameToTopic.get(n);
try {
TopicDescription topicDescription = f.get(this.operationTimeout, TimeUnit.SECONDS);
if (topic.numPartitions() >= 0 && topic.numPartitions() < topicDescription.partitions().size()) {
LOGGER.info(() -> String.format(
"Topic '%s' exists but has a different partition count: %d not %d", n,
topicDescription.partitions().size(), topic.numPartitions()));
}
else if (topic.numPartitions() > topicDescription.partitions().size()) {
LOGGER.info(() -> String.format(
"Topic '%s' exists but has a different partition count: %d not %d, increasing "
+ "if the broker supports it", n,
topicDescription.partitions().size(), topic.numPartitions()));
topicsToModify.put(n, NewPartitions.increaseTo(topic.numPartitions()));
}
}
catch (@SuppressWarnings("unused") InterruptedException e) {
Thread.currentThread().interrupt();
}
catch (TimeoutException e) {
throw new KafkaException("Timed out waiting to get existing topics", e);
}
catch (@SuppressWarnings("unused") ExecutionException e) {
topicsToAdd.add(topic);
}
});
return topicsToModify;
}
private void addTopics(AdminClient adminClient, List<NewTopic> topicsToAdd) {
CreateTopicsResult topicResults = adminClient.createTopics(topicsToAdd);
try {
topicResults.all().get(this.operationTimeout, TimeUnit.SECONDS);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOGGER.error(e, "Interrupted while waiting for topic creation results");
}
catch (TimeoutException e) {
throw new KafkaException("Timed out waiting for create topics results", e);
}
catch (ExecutionException e) {
if (e.getCause() instanceof TopicExistsException) { // Possible race with another app instance
LOGGER.debug(e.getCause(), "Failed to create topics");
}
else {
LOGGER.error(e.getCause(), "Failed to create topics");
throw new KafkaException("Failed to create topics", e.getCause()); // NOSONAR
}
}
}
private void modifyTopics(AdminClient adminClient, Map<String, NewPartitions> topicsToModify) {
CreatePartitionsResult partitionsResult = adminClient.createPartitions(topicsToModify);
try {
partitionsResult.all().get(this.operationTimeout, TimeUnit.SECONDS);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOGGER.error(e, "Interrupted while waiting for partition creation results");
}
catch (TimeoutException e) {
throw new KafkaException("Timed out waiting for create partitions results", e);
}
catch (ExecutionException e) {
if (e.getCause() instanceof InvalidPartitionsException) { // Possible race with another app instance
LOGGER.debug(e.getCause(), "Failed to create partitions");
}
else {
LOGGER.error(e.getCause(), "Failed to create partitions");
if (!(e.getCause() instanceof UnsupportedVersionException)) {
throw new KafkaException("Failed to create partitions", e.getCause()); // NOSONAR
}
}
}
}
/**
* Wrapper for a collection of {@link NewTopic} to facilitate declaring multiple
* topics as a single bean.
*
* @since 2.7
*
*/
public static class NewTopics {
private final Collection<NewTopic> newTopics = new ArrayList<>();
/**
* Construct an instance with the {@link NewTopic}s.
* @param newTopics the topics.
*/
public NewTopics(NewTopic... newTopics) {
this.newTopics.addAll(Arrays.asList(newTopics));
}
Collection<NewTopic> getNewTopics() {
return this.newTopics;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/KafkaAdminOperations.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.util.Map;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.admin.TopicDescription;
/**
* Provides a number of convenience methods wrapping {@code AdminClient}.
*
* @author Gary Russell
* @since 2.7
*
*/
public interface KafkaAdminOperations {
/**
* Get an unmodifiable copy of this admin's configuration.
* @return the configuration map.
*/
Map<String, Object> getConfigurationProperties();
/**
* Create topics if they don't exist or increase the number of partitions if needed.
* @param topics the topics.
*/
void createOrModifyTopics(NewTopic... topics);
/**
* Obtain {@link TopicDescription}s for these topics.
* @param topicNames the topic names.
* @return a map of name:topicDescription.
*/
Map<String, TopicDescription> describeTopics(String... topicNames);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/KafkaFailureCallback.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import org.springframework.util.concurrent.FailureCallback;
/**
* An enhanced {@link FailureCallback} for reporting
* {@link KafkaProducerException}s.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.5
*
*/
@FunctionalInterface
public interface KafkaFailureCallback<K, V> extends FailureCallback {
@Override
default void onFailure(Throwable ex) {
onFailure((KafkaProducerException) ex); // NOSONAR (unchecked cast)
}
/**
* Called when the send fails.
* @param ex the exception.
*/
void onFailure(KafkaProducerException ex);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/KafkaOperations.java | /*
* Copyright 2015-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.time.Duration;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerGroupMetadata;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.springframework.kafka.support.SendResult;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
import org.springframework.util.concurrent.ListenableFuture;
/**
* The basic Kafka operations contract returning {@link ListenableFuture}s.
*
* @param <K> the key type.
* @param <V> the value type.
*
* If the Kafka topic is set with {@link org.apache.kafka.common.record.TimestampType#CREATE_TIME CreateTime}
* all send operations will use the user provided time if provided, else
* {@link org.apache.kafka.clients.producer.KafkaProducer} will generate one
*
* If the topic is set with {@link org.apache.kafka.common.record.TimestampType#LOG_APPEND_TIME LogAppendTime}
* then the user provided timestamp will be ignored and instead will be the
* Kafka broker local time when the message is appended
*
* @author Marius Bogoevici
* @author Gary Russell
* @author Biju Kunjummen
*/
public interface KafkaOperations<K, V> {
/**
* Default timeout for {@link #receive(String, int, long)}.
*/
Duration DEFAULT_POLL_TIMEOUT = Duration.ofSeconds(5);
/**
* Send the data to the default topic with no key or partition.
* @param data The data.
* @return a Future for the {@link SendResult}.
*/
ListenableFuture<SendResult<K, V>> sendDefault(V data);
/**
* Send the data to the default topic with the provided key and no partition.
* @param key the key.
* @param data The data.
* @return a Future for the {@link SendResult}.
*/
ListenableFuture<SendResult<K, V>> sendDefault(K key, V data);
/**
* Send the data to the default topic with the provided key and partition.
* @param partition the partition.
* @param key the key.
* @param data the data.
* @return a Future for the {@link SendResult}.
*/
ListenableFuture<SendResult<K, V>> sendDefault(Integer partition, K key, V data);
/**
* Send the data to the default topic with the provided key and partition.
* @param partition the partition.
* @param timestamp the timestamp of the record.
* @param key the key.
* @param data the data.
* @return a Future for the {@link SendResult}.
* @since 1.3
*/
ListenableFuture<SendResult<K, V>> sendDefault(Integer partition, Long timestamp, K key, V data);
/**
* Send the data to the provided topic with no key or partition.
* @param topic the topic.
* @param data The data.
* @return a Future for the {@link SendResult}.
*/
ListenableFuture<SendResult<K, V>> send(String topic, V data);
/**
* Send the data to the provided topic with the provided key and no partition.
* @param topic the topic.
* @param key the key.
* @param data The data.
* @return a Future for the {@link SendResult}.
*/
ListenableFuture<SendResult<K, V>> send(String topic, K key, V data);
/**
* Send the data to the provided topic with the provided key and partition.
* @param topic the topic.
* @param partition the partition.
* @param key the key.
* @param data the data.
* @return a Future for the {@link SendResult}.
*/
ListenableFuture<SendResult<K, V>> send(String topic, Integer partition, K key, V data);
/**
* Send the data to the provided topic with the provided key and partition.
* @param topic the topic.
* @param partition the partition.
* @param timestamp the timestamp of the record.
* @param key the key.
* @param data the data.
* @return a Future for the {@link SendResult}.
* @since 1.3
*/
ListenableFuture<SendResult<K, V>> send(String topic, Integer partition, Long timestamp, K key, V data);
/**
* Send the provided {@link ProducerRecord}.
* @param record the record.
* @return a Future for the {@link SendResult}.
* @since 1.3
*/
ListenableFuture<SendResult<K, V>> send(ProducerRecord<K, V> record);
/**
* Send a message with routing information in message headers. The message payload
* may be converted before sending.
* @param message the message to send.
* @return a Future for the {@link SendResult}.
* @see org.springframework.kafka.support.KafkaHeaders#TOPIC
* @see org.springframework.kafka.support.KafkaHeaders#PARTITION_ID
* @see org.springframework.kafka.support.KafkaHeaders#MESSAGE_KEY
*/
ListenableFuture<SendResult<K, V>> send(Message<?> message);
/**
* See {@link Producer#partitionsFor(String)}.
* @param topic the topic.
* @return the partition info.
* @since 1.1
*/
List<PartitionInfo> partitionsFor(String topic);
/**
* See {@link Producer#metrics()}.
* @return the metrics.
* @since 1.1
*/
Map<MetricName, ? extends Metric> metrics();
/**
* Execute some arbitrary operation(s) on the producer and return the result.
* @param callback the callback.
* @param <T> the result type.
* @return the result.
* @since 1.1
*/
@Nullable
<T> T execute(ProducerCallback<K, V, T> callback);
/**
* Execute some arbitrary operation(s) on the operations and return the result.
* The operations are invoked within a local transaction and do not participate
* in a global transaction (if present).
* @param callback the callback.
* @param <T> the result type.
* @return the result.
* @since 1.1
*/
@Nullable
<T> T executeInTransaction(OperationsCallback<K, V, T> callback);
/**
* Flush the producer.
*/
void flush();
/**
* When running in a transaction, send the consumer offset(s) to the transaction. The
* group id is obtained from
* {@link org.springframework.kafka.support.KafkaUtils#getConsumerGroupId()}. It is
* not necessary to call this method if the operations are invoked on a listener
* container thread (and the listener container is configured with a
* {@link org.springframework.kafka.transaction.KafkaAwareTransactionManager}) since
* the container will take care of sending the offsets to the transaction.
* @param offsets The offsets.
* @since 1.3
* @deprecated in the 3.0.0 KafkaProducer.
*/
@Deprecated
void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets);
/**
* When running in a transaction, send the consumer offset(s) to the transaction. It
* is not necessary to call this method if the operations are invoked on a listener
* container thread (and the listener container is configured with a
* {@link org.springframework.kafka.transaction.KafkaAwareTransactionManager}) since
* the container will take care of sending the offsets to the transaction.
* @param offsets The offsets.
* @param consumerGroupId the consumer's group.id.
* @since 1.3
* @deprecated in the 3.0.0 KafkaProducer.
*/
@Deprecated
void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId);
/**
* When running in a transaction, send the consumer offset(s) to the transaction. It
* is not necessary to call this method if the operations are invoked on a listener
* container thread (and the listener container is configured with a
* {@link org.springframework.kafka.transaction.KafkaAwareTransactionManager}) since
* the container will take care of sending the offsets to the transaction.
* Use with 2.5 brokers or later.
* @param offsets The offsets.
* @param groupMetadata the consumer group metadata.
* @since 2.5
* @see Producer#sendOffsetsToTransaction(Map, ConsumerGroupMetadata)
*/
default void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
ConsumerGroupMetadata groupMetadata) {
throw new UnsupportedOperationException();
}
/**
* Return true if the implementation supports transactions (has a transaction-capable
* producer factory).
* @return true or false.
* @since 2.3
*/
boolean isTransactional();
/**
* Return true if this template, when transactional, allows non-transactional operations.
* @return true to allow.
* @since 2.4.3
*/
default boolean isAllowNonTransactional() {
return false;
}
/**
* Return true if the template is currently running in a transaction on the calling
* thread.
* @return true if a transaction is running.
* @since 2.5
*/
default boolean inTransaction() {
return false;
}
/**
* Return the producer factory used by this template.
* @return the factory.
* @since 2.5
*/
default ProducerFactory<K, V> getProducerFactory() {
throw new UnsupportedOperationException("This implementation does not support this operation");
}
/**
* Receive a single record with the default poll timeout (5 seconds).
* @param topic the topic.
* @param partition the partition.
* @param offset the offset.
* @return the record or null.
* @since 2.8
* @see #DEFAULT_POLL_TIMEOUT
*/
@Nullable
default ConsumerRecord<K, V> receive(String topic, int partition, long offset) {
return receive(topic, partition, offset, DEFAULT_POLL_TIMEOUT);
}
/**
* Receive a single record.
* @param topic the topic.
* @param partition the partition.
* @param offset the offset.
* @param pollTimeout the timeout.
* @return the record or null.
* @since 2.8
*/
@Nullable
ConsumerRecord<K, V> receive(String topic, int partition, long offset, Duration pollTimeout);
/**
* Receive a multiple records with the default poll timeout (5 seconds). Only
* absolute, positive offsets are supported.
* @param requested a collection of record requests (topic/partition/offset).
* @return the records
* @since 2.8
* @see #DEFAULT_POLL_TIMEOUT
*/
default ConsumerRecords<K, V> receive(Collection<TopicPartitionOffset> requested) {
return receive(requested, DEFAULT_POLL_TIMEOUT);
}
/**
* Receive multiple records. Only absolute, positive offsets are supported.
* @param requested a collection of record requests (topic/partition/offset).
* @param pollTimeout the timeout.
* @return the record or null.
* @since 2.8
*/
ConsumerRecords<K, V> receive(Collection<TopicPartitionOffset> requested, Duration pollTimeout);
/**
* A callback for executing arbitrary operations on the {@link Producer}.
* @param <K> the key type.
* @param <V> the value type.
* @param <T> the return type.
* @since 1.3
*/
interface ProducerCallback<K, V, T> {
T doInKafka(Producer<K, V> producer);
}
/**
* A callback for executing arbitrary operations on the {@link KafkaOperations}.
* @param <K> the key type.
* @param <V> the value type.
* @param <T> the return type.
* @since 1.3
*/
interface OperationsCallback<K, V, T> {
T doInOperations(KafkaOperations<K, V> operations);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/KafkaProducerException.java | /*
* Copyright 2016-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.kafka.KafkaException;
/**
* Exceptions when producing.
*
* @author Gary Russell
*
*/
@SuppressWarnings("serial")
public class KafkaProducerException extends KafkaException {
private final ProducerRecord<?, ?> producerRecord;
/**
* Construct an instance with the provided properties.
* @param failedProducerRecord the producer record.
* @param message the message.
* @param cause the cause.
*/
public KafkaProducerException(ProducerRecord<?, ?> failedProducerRecord, String message, Throwable cause) {
super(message, cause);
this.producerRecord = failedProducerRecord;
}
/**
* Return the failed producer record.
* @param <K> the key type.
* @param <V> the value type.
* @return the record.
* @since 2.5
*/
@SuppressWarnings("unchecked")
public <K, V> ProducerRecord<K, V> getFailedProducerRecord() {
return (ProducerRecord<K, V>) this.producerRecord;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/KafkaResourceFactory.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.util.Map;
import java.util.function.Supplier;
import org.apache.kafka.clients.CommonClientConfigs;
import org.springframework.lang.Nullable;
/**
* Base class for consumer/producer/admin creators.
*
* @author Gary Russell
* @since 2.5
*
*/
public abstract class KafkaResourceFactory {
private Supplier<String> bootstrapServersSupplier;
@Nullable
protected String getBootstrapServers() {
return this.bootstrapServersSupplier == null ? null : this.bootstrapServersSupplier.get();
}
/**
* Set a supplier for the bootstrap server list to override any configured in a
* subclass.
* @param bootstrapServersSupplier the supplier.
*/
public void setBootstrapServersSupplier(Supplier<String> bootstrapServersSupplier) {
this.bootstrapServersSupplier = bootstrapServersSupplier;
}
/**
* Enhance the properties by calling the
* {@link #setBootstrapServersSupplier(Supplier)} and replace the bootstrap servers
* properties.
* @param configs the configs.
*/
protected void checkBootstrap(Map<String, Object> configs) {
String bootstrapServers = getBootstrapServers();
if (bootstrapServers != null) {
configs.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/KafkaResourceHolder.java | /*
* Copyright 2017-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.time.Duration;
import org.apache.kafka.clients.producer.Producer;
import org.springframework.transaction.support.ResourceHolderSupport;
import org.springframework.util.Assert;
/**
* Kafka resource holder, wrapping a Kafka producer. KafkaTransactionManager binds instances of this
* class to the thread, for a given Kafka producer factory.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*/
public class KafkaResourceHolder<K, V> extends ResourceHolderSupport {
private final Producer<K, V> producer;
private final Duration closeTimeout;
/**
* Construct an instance for the producer.
* @param producer the producer.
* @param closeTimeout the close timeout.
*/
public KafkaResourceHolder(Producer<K, V> producer, Duration closeTimeout) {
Assert.notNull(producer, "'producer' cannot be null");
Assert.notNull(closeTimeout, "'closeTimeout' cannot be null");
this.producer = producer;
this.closeTimeout = closeTimeout;
}
public Producer<K, V> getProducer() {
return this.producer;
}
public void commit() {
this.producer.commitTransaction();
}
public void close() {
this.producer.close(this.closeTimeout);
}
public void rollback() {
this.producer.abortTransaction();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/KafkaSendCallback.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.ListenableFutureCallback;
/**
* An enhanced {@link ListenableFutureCallback} for reporting
* {@link KafkaProducerException}s.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.5
*
*/
public interface KafkaSendCallback<K, V> extends ListenableFutureCallback<SendResult<K, V>>, KafkaFailureCallback<K, V> {
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/KafkaTemplate.java | /*
* Copyright 2015-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerGroupMetadata;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.factory.BeanNameAware;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.ApplicationListener;
import org.springframework.context.event.ContextStoppedEvent;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.KafkaException;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.KafkaUtils;
import org.springframework.kafka.support.LoggingProducerListener;
import org.springframework.kafka.support.ProducerListener;
import org.springframework.kafka.support.SendResult;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.kafka.support.TransactionSupport;
import org.springframework.kafka.support.converter.MessagingMessageConverter;
import org.springframework.kafka.support.converter.RecordMessageConverter;
import org.springframework.kafka.support.micrometer.MicrometerHolder;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
import org.springframework.messaging.converter.SmartMessageConverter;
import org.springframework.transaction.support.TransactionSynchronizationManager;
import org.springframework.util.Assert;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.SettableListenableFuture;
/**
* A template for executing high-level operations. When used with a
* {@link DefaultKafkaProducerFactory}, the template is thread-safe. The producer factory
* and {@link org.apache.kafka.clients.producer.KafkaProducer} ensure this; refer to their
* respective javadocs.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Marius Bogoevici
* @author Gary Russell
* @author Igor Stepanov
* @author Artem Bilan
* @author Biju Kunjummen
* @author Endika Gutierrez
* @author Thomas Strauß
*/
public class KafkaTemplate<K, V> implements KafkaOperations<K, V>, ApplicationContextAware, BeanNameAware,
ApplicationListener<ContextStoppedEvent>, DisposableBean {
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(this.getClass())); //NOSONAR
private final ProducerFactory<K, V> producerFactory;
private final boolean customProducerFactory;
private final boolean autoFlush;
private final boolean transactional;
private final ThreadLocal<Producer<K, V>> producers = new ThreadLocal<>();
private final Map<String, String> micrometerTags = new HashMap<>();
private String beanName = "kafkaTemplate";
private ApplicationContext applicationContext;
private RecordMessageConverter messageConverter = new MessagingMessageConverter();
private String defaultTopic;
private ProducerListener<K, V> producerListener = new LoggingProducerListener<K, V>();
private String transactionIdPrefix;
private Duration closeTimeout = ProducerFactoryUtils.DEFAULT_CLOSE_TIMEOUT;
private boolean allowNonTransactional;
private boolean converterSet;
private ConsumerFactory<K, V> consumerFactory;
private volatile boolean micrometerEnabled = true;
private volatile MicrometerHolder micrometerHolder;
/**
* Create an instance using the supplied producer factory and autoFlush false.
* @param producerFactory the producer factory.
*/
public KafkaTemplate(ProducerFactory<K, V> producerFactory) {
this(producerFactory, false);
}
/**
* Create an instance using the supplied producer factory and properties, with
* autoFlush false. If the configOverrides is not null or empty, a new
* {@link DefaultKafkaProducerFactory} will be created with merged producer properties
* with the overrides being applied after the supplied factory's properties.
* @param producerFactory the producer factory.
* @param configOverrides producer configuration properties to override.
* @since 2.5
*/
public KafkaTemplate(ProducerFactory<K, V> producerFactory, @Nullable Map<String, Object> configOverrides) {
this(producerFactory, false, configOverrides);
}
/**
* Create an instance using the supplied producer factory and autoFlush setting.
* <p>
* Set autoFlush to {@code true} if you have configured the producer's
* {@code linger.ms} to a non-default value and wish send operations on this template
* to occur immediately, regardless of that setting, or if you wish to block until the
* broker has acknowledged receipt according to the producer's {@code acks} property.
* @param producerFactory the producer factory.
* @param autoFlush true to flush after each send.
* @see Producer#flush()
*/
public KafkaTemplate(ProducerFactory<K, V> producerFactory, boolean autoFlush) {
this(producerFactory, autoFlush, null);
}
/**
* Create an instance using the supplied producer factory and autoFlush setting.
* <p>
* Set autoFlush to {@code true} if you have configured the producer's
* {@code linger.ms} to a non-default value and wish send operations on this template
* to occur immediately, regardless of that setting, or if you wish to block until the
* broker has acknowledged receipt according to the producer's {@code acks} property.
* If the configOverrides is not null or empty, a new
* {@link ProducerFactory} will be created using
* {@link org.springframework.kafka.core.ProducerFactory#copyWithConfigurationOverride(java.util.Map)}
* The factory shall apply the overrides after the supplied factory's properties.
* The {@link org.springframework.kafka.core.ProducerPostProcessor}s from the
* original factory are copied over to keep instrumentation alive.
* Registered {@link org.springframework.kafka.core.ProducerFactory.Listener}s are
* also added to the new factory. If the factory implementation does not support
* the copy operation, a generic copy of the ProducerFactory is created which will
* be of type
* DefaultKafkaProducerFactory.
* @param producerFactory the producer factory.
* @param autoFlush true to flush after each send.
* @param configOverrides producer configuration properties to override.
* @since 2.5
* @see Producer#flush()
*/
public KafkaTemplate(ProducerFactory<K, V> producerFactory, boolean autoFlush,
@Nullable Map<String, Object> configOverrides) {
Assert.notNull(producerFactory, "'producerFactory' cannot be null");
this.autoFlush = autoFlush;
this.micrometerEnabled = KafkaUtils.MICROMETER_PRESENT;
this.customProducerFactory = configOverrides != null && configOverrides.size() > 0;
if (this.customProducerFactory) {
this.producerFactory = producerFactory.copyWithConfigurationOverride(configOverrides);
}
else {
this.producerFactory = producerFactory;
}
this.transactional = this.producerFactory.transactionCapable();
}
@Override
public void setBeanName(String name) {
this.beanName = name;
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) {
this.applicationContext = applicationContext;
if (this.customProducerFactory) {
((DefaultKafkaProducerFactory<K, V>) this.producerFactory).setApplicationContext(applicationContext);
}
}
/**
* The default topic for send methods where a topic is not
* provided.
* @return the topic.
*/
public String getDefaultTopic() {
return this.defaultTopic;
}
/**
* Set the default topic for send methods where a topic is not
* provided.
* @param defaultTopic the topic.
*/
public void setDefaultTopic(String defaultTopic) {
this.defaultTopic = defaultTopic;
}
/**
* Set a {@link ProducerListener} which will be invoked when Kafka acknowledges
* a send operation. By default a {@link LoggingProducerListener} is configured
* which logs errors only.
* @param producerListener the listener; may be {@code null}.
*/
public void setProducerListener(@Nullable ProducerListener<K, V> producerListener) {
this.producerListener = producerListener;
}
/**
* Return the message converter.
* @return the message converter.
*/
public RecordMessageConverter getMessageConverter() {
return this.messageConverter;
}
/**
* Set the message converter to use.
* @param messageConverter the message converter.
*/
public void setMessageConverter(RecordMessageConverter messageConverter) {
Assert.notNull(messageConverter, "'messageConverter' cannot be null");
this.messageConverter = messageConverter;
this.converterSet = true;
}
/**
* Set the {@link SmartMessageConverter} to use with the default
* {@link MessagingMessageConverter}. Not allowed when a custom
* {@link #setMessageConverter(RecordMessageConverter) messageConverter} is provided.
* @param messageConverter the converter.
* @since 2.7.1
*/
public void setMessagingConverter(SmartMessageConverter messageConverter) {
Assert.isTrue(!this.converterSet, "Cannot set the SmartMessageConverter when setting the messageConverter, "
+ "add the SmartConverter to the message converter instead");
((MessagingMessageConverter) this.messageConverter).setMessagingConverter(messageConverter);
}
@Override
public boolean isTransactional() {
return this.transactional;
}
public String getTransactionIdPrefix() {
return this.transactionIdPrefix;
}
/**
* Set a transaction id prefix to override the prefix in the producer factory.
* @param transactionIdPrefix the prefix.
* @since 2.3
*/
public void setTransactionIdPrefix(String transactionIdPrefix) {
this.transactionIdPrefix = transactionIdPrefix;
}
/**
* Set the maximum time to wait when closing a producer; default 5 seconds.
* @param closeTimeout the close timeout.
* @since 2.1.14
*/
public void setCloseTimeout(Duration closeTimeout) {
Assert.notNull(closeTimeout, "'closeTimeout' cannot be null");
this.closeTimeout = closeTimeout;
}
/**
* Set to true to allow a non-transactional send when the template is transactional.
* @param allowNonTransactional true to allow.
* @since 2.4.3
*/
public void setAllowNonTransactional(boolean allowNonTransactional) {
this.allowNonTransactional = allowNonTransactional;
}
@Override
public boolean isAllowNonTransactional() {
return this.allowNonTransactional;
}
/**
* Set to false to disable micrometer timers, if micrometer is on the class path.
* @param micrometerEnabled false to disable.
* @since 2.5
*/
public void setMicrometerEnabled(boolean micrometerEnabled) {
this.micrometerEnabled = micrometerEnabled;
}
/**
* Set additional tags for the Micrometer listener timers.
* @param tags the tags.
* @since 2.5
*/
public void setMicrometerTags(Map<String, String> tags) {
if (tags != null) {
this.micrometerTags.putAll(tags);
}
}
/**
* Return the producer factory used by this template.
* @return the factory.
* @since 2.2.5
*/
@Override
public ProducerFactory<K, V> getProducerFactory() {
return this.producerFactory;
}
/**
* Return the producer factory used by this template based on the topic.
* The default implementation returns the only producer factory.
* @param topic the topic.
* @return the factory.
* @since 2.5
*/
protected ProducerFactory<K, V> getProducerFactory(String topic) {
return this.producerFactory;
}
/**
* Set a consumer factory for receive operations.
* @param consumerFactory the consumer factory.
* @since 2.8
*/
public void setConsumerFactory(ConsumerFactory<K, V> consumerFactory) {
this.consumerFactory = consumerFactory;
}
@Override
public void onApplicationEvent(ContextStoppedEvent event) {
if (this.customProducerFactory) {
((DefaultKafkaProducerFactory<K, V>) this.producerFactory).onApplicationEvent(event);
}
}
@Override
public ListenableFuture<SendResult<K, V>> sendDefault(@Nullable V data) {
return send(this.defaultTopic, data);
}
@Override
public ListenableFuture<SendResult<K, V>> sendDefault(K key, @Nullable V data) {
return send(this.defaultTopic, key, data);
}
@Override
public ListenableFuture<SendResult<K, V>> sendDefault(Integer partition, K key, @Nullable V data) {
return send(this.defaultTopic, partition, key, data);
}
@Override
public ListenableFuture<SendResult<K, V>> sendDefault(Integer partition, Long timestamp, K key, @Nullable V data) {
return send(this.defaultTopic, partition, timestamp, key, data);
}
@Override
public ListenableFuture<SendResult<K, V>> send(String topic, @Nullable V data) {
ProducerRecord<K, V> producerRecord = new ProducerRecord<>(topic, data);
return doSend(producerRecord);
}
@Override
public ListenableFuture<SendResult<K, V>> send(String topic, K key, @Nullable V data) {
ProducerRecord<K, V> producerRecord = new ProducerRecord<>(topic, key, data);
return doSend(producerRecord);
}
@Override
public ListenableFuture<SendResult<K, V>> send(String topic, Integer partition, K key, @Nullable V data) {
ProducerRecord<K, V> producerRecord = new ProducerRecord<>(topic, partition, key, data);
return doSend(producerRecord);
}
@Override
public ListenableFuture<SendResult<K, V>> send(String topic, Integer partition, Long timestamp, K key,
@Nullable V data) {
ProducerRecord<K, V> producerRecord = new ProducerRecord<>(topic, partition, timestamp, key, data);
return doSend(producerRecord);
}
@Override
public ListenableFuture<SendResult<K, V>> send(ProducerRecord<K, V> record) {
Assert.notNull(record, "'record' cannot be null");
return doSend(record);
}
@SuppressWarnings("unchecked")
@Override
public ListenableFuture<SendResult<K, V>> send(Message<?> message) {
ProducerRecord<?, ?> producerRecord = this.messageConverter.fromMessage(message, this.defaultTopic);
if (!producerRecord.headers().iterator().hasNext()) { // possibly no Jackson
byte[] correlationId = message.getHeaders().get(KafkaHeaders.CORRELATION_ID, byte[].class);
if (correlationId != null) {
producerRecord.headers().add(KafkaHeaders.CORRELATION_ID, correlationId);
}
}
return doSend((ProducerRecord<K, V>) producerRecord);
}
@Override
public List<PartitionInfo> partitionsFor(String topic) {
Producer<K, V> producer = getTheProducer();
try {
return producer.partitionsFor(topic);
}
finally {
closeProducer(producer, inTransaction());
}
}
@Override
public Map<MetricName, ? extends Metric> metrics() {
Producer<K, V> producer = getTheProducer();
try {
return producer.metrics();
}
finally {
closeProducer(producer, inTransaction());
}
}
@Override
public <T> T execute(ProducerCallback<K, V, T> callback) {
Assert.notNull(callback, "'callback' cannot be null");
Producer<K, V> producer = getTheProducer();
try {
return callback.doInKafka(producer);
}
finally {
closeProducer(producer, inTransaction());
}
}
@Override
public <T> T executeInTransaction(OperationsCallback<K, V, T> callback) {
Assert.notNull(callback, "'callback' cannot be null");
Assert.state(this.transactional, "Producer factory does not support transactions");
Producer<K, V> producer = this.producers.get();
Assert.state(producer == null, "Nested calls to 'executeInTransaction' are not allowed");
String transactionIdSuffix;
if (this.producerFactory.isProducerPerConsumerPartition()) {
transactionIdSuffix = TransactionSupport.getTransactionIdSuffix();
TransactionSupport.clearTransactionIdSuffix();
}
else {
transactionIdSuffix = null;
}
producer = this.producerFactory.createProducer(this.transactionIdPrefix);
try {
producer.beginTransaction();
}
catch (Exception e) {
closeProducer(producer, false);
throw e;
}
this.producers.set(producer);
try {
T result = callback.doInOperations(this);
try {
producer.commitTransaction();
}
catch (Exception e) {
throw new SkipAbortException(e);
}
return result;
}
catch (SkipAbortException e) { // NOSONAR - exception flow control
throw ((RuntimeException) e.getCause()); // NOSONAR - lost stack trace
}
catch (Exception e) {
producer.abortTransaction();
throw e;
}
finally {
if (transactionIdSuffix != null) {
TransactionSupport.setTransactionIdSuffix(transactionIdSuffix);
}
this.producers.remove();
closeProducer(producer, false);
}
}
/**
* {@inheritDoc}
* <p><b>Note</b> It only makes sense to invoke this method if the
* {@link ProducerFactory} serves up a singleton producer (such as the
* {@link DefaultKafkaProducerFactory}).
*/
@Override
public void flush() {
Producer<K, V> producer = getTheProducer();
try {
producer.flush();
}
finally {
closeProducer(producer, inTransaction());
}
}
@Override
@Deprecated
public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets) {
sendOffsetsToTransaction(offsets, KafkaUtils.getConsumerGroupId());
}
@SuppressWarnings("deprecation")
@Override
@Deprecated
public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) {
producerForOffsets().sendOffsetsToTransaction(offsets, consumerGroupId);
}
@Override
public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
ConsumerGroupMetadata groupMetadata) {
producerForOffsets().sendOffsetsToTransaction(offsets, groupMetadata);
}
@Override
@Nullable
public ConsumerRecord<K, V> receive(String topic, int partition, long offset, Duration pollTimeout) {
Properties props = oneOnly();
try (Consumer<K, V> consumer = this.consumerFactory.createConsumer(null, null, null, props)) {
TopicPartition topicPartition = new TopicPartition(topic, partition);
return receiveOne(topicPartition, offset, pollTimeout, consumer);
}
}
@Override
public ConsumerRecords<K, V> receive(Collection<TopicPartitionOffset> requested, Duration pollTimeout) {
Properties props = oneOnly();
Map<TopicPartition, List<ConsumerRecord<K, V>>> records = new LinkedHashMap<>();
try (Consumer<K, V> consumer = this.consumerFactory.createConsumer(null, null, null, props)) {
requested.forEach(tpo -> {
ConsumerRecord<K, V> one = receiveOne(tpo.getTopicPartition(), tpo.getOffset(), pollTimeout, consumer);
records.computeIfAbsent(tpo.getTopicPartition(), tp -> new ArrayList<>()).add(one);
});
return new ConsumerRecords<>(records);
}
}
private Properties oneOnly() {
Assert.notNull(this.consumerFactory, "A consumerFactory is required");
Properties props = new Properties();
props.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1");
return props;
}
@Nullable
private ConsumerRecord<K, V> receiveOne(TopicPartition topicPartition, long offset, Duration pollTimeout,
Consumer<K, V> consumer) {
consumer.assign(Collections.singletonList(topicPartition));
consumer.seek(topicPartition, offset);
ConsumerRecords<K, V> records = consumer.poll(pollTimeout);
if (records.count() == 1) {
return records.iterator().next();
}
return null;
}
private Producer<K, V> producerForOffsets() {
Producer<K, V> producer = this.producers.get();
if (producer == null) {
@SuppressWarnings("unchecked")
KafkaResourceHolder<K, V> resourceHolder = (KafkaResourceHolder<K, V>) TransactionSynchronizationManager
.getResource(this.producerFactory);
Assert.isTrue(resourceHolder != null, "No transaction in process");
producer = resourceHolder.getProducer();
}
return producer;
}
protected void closeProducer(Producer<K, V> producer, boolean inTx) {
if (!inTx) {
producer.close(this.closeTimeout);
}
}
/**
* Send the producer record.
* @param producerRecord the producer record.
* @return a Future for the {@link org.apache.kafka.clients.producer.RecordMetadata
* RecordMetadata}.
*/
protected ListenableFuture<SendResult<K, V>> doSend(final ProducerRecord<K, V> producerRecord) {
final Producer<K, V> producer = getTheProducer(producerRecord.topic());
this.logger.trace(() -> "Sending: " + KafkaUtils.format(producerRecord));
final SettableListenableFuture<SendResult<K, V>> future = new SettableListenableFuture<>();
Object sample = null;
if (this.micrometerEnabled && this.micrometerHolder == null) {
this.micrometerHolder = obtainMicrometerHolder();
}
if (this.micrometerHolder != null) {
sample = this.micrometerHolder.start();
}
Future<RecordMetadata> sendFuture =
producer.send(producerRecord, buildCallback(producerRecord, producer, future, sample));
// May be an immediate failure
if (sendFuture.isDone()) {
try {
sendFuture.get();
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new KafkaException("Interrupted", e);
}
catch (ExecutionException e) {
throw new KafkaException("Send failed", e.getCause()); // NOSONAR, stack trace
}
}
if (this.autoFlush) {
flush();
}
this.logger.trace(() -> "Sent: " + KafkaUtils.format(producerRecord));
return future;
}
private Callback buildCallback(final ProducerRecord<K, V> producerRecord, final Producer<K, V> producer,
final SettableListenableFuture<SendResult<K, V>> future, @Nullable Object sample) {
return (metadata, exception) -> {
try {
if (exception == null) {
if (sample != null) {
this.micrometerHolder.success(sample);
}
future.set(new SendResult<>(producerRecord, metadata));
if (KafkaTemplate.this.producerListener != null) {
KafkaTemplate.this.producerListener.onSuccess(producerRecord, metadata);
}
KafkaTemplate.this.logger.trace(() -> "Sent ok: " + KafkaUtils.format(producerRecord)
+ ", metadata: " + metadata);
}
else {
if (sample != null) {
this.micrometerHolder.failure(sample, exception.getClass().getSimpleName());
}
future.setException(new KafkaProducerException(producerRecord, "Failed to send", exception));
if (KafkaTemplate.this.producerListener != null) {
KafkaTemplate.this.producerListener.onError(producerRecord, metadata, exception);
}
KafkaTemplate.this.logger.debug(exception, () -> "Failed to send: "
+ KafkaUtils.format(producerRecord));
}
}
finally {
if (!KafkaTemplate.this.transactional) {
closeProducer(producer, false);
}
}
};
}
/**
* Return true if the template is currently running in a transaction on the calling
* thread.
* @return true if a transaction is running.
* @since 2.2.1
*/
@Override
public boolean inTransaction() {
return this.transactional && (this.producers.get() != null
|| TransactionSynchronizationManager.getResource(this.producerFactory) != null
|| TransactionSynchronizationManager.isActualTransactionActive());
}
private Producer<K, V> getTheProducer() {
return getTheProducer(null);
}
protected Producer<K, V> getTheProducer(@SuppressWarnings("unused") @Nullable String topic) {
boolean transactionalProducer = this.transactional;
if (transactionalProducer) {
boolean inTransaction = inTransaction();
Assert.state(this.allowNonTransactional || inTransaction,
"No transaction is in process; "
+ "possible solutions: run the template operation within the scope of a "
+ "template.executeInTransaction() operation, start a transaction with @Transactional "
+ "before invoking the template method, "
+ "run in a transaction started by a listener container when consuming a record");
if (!inTransaction) {
transactionalProducer = false;
}
}
if (transactionalProducer) {
Producer<K, V> producer = this.producers.get();
if (producer != null) {
return producer;
}
KafkaResourceHolder<K, V> holder = ProducerFactoryUtils
.getTransactionalResourceHolder(this.producerFactory, this.transactionIdPrefix, this.closeTimeout);
return holder.getProducer();
}
else if (this.allowNonTransactional) {
return this.producerFactory.createNonTransactionalProducer();
}
else if (topic == null) {
return this.producerFactory.createProducer();
}
else {
return getProducerFactory(topic).createProducer();
}
}
@Nullable
private MicrometerHolder obtainMicrometerHolder() {
MicrometerHolder holder = null;
try {
if (KafkaUtils.MICROMETER_PRESENT) {
holder = new MicrometerHolder(this.applicationContext, this.beanName,
"spring.kafka.template", "KafkaTemplate Timer",
this.micrometerTags);
}
}
catch (@SuppressWarnings("unused") IllegalStateException ex) {
this.micrometerEnabled = false;
}
return holder;
}
@Override
public void destroy() {
if (this.micrometerHolder != null) {
this.micrometerHolder.destroy();
}
if (this.customProducerFactory) {
((DefaultKafkaProducerFactory<K, V>) this.producerFactory).destroy();
}
}
@SuppressWarnings("serial")
private static final class SkipAbortException extends RuntimeException {
SkipAbortException(Throwable cause) {
super(cause);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/MicrometerConsumerListener.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.kafka.clients.consumer.Consumer;
import io.micrometer.core.instrument.ImmutableTag;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.Tag;
import io.micrometer.core.instrument.binder.kafka.KafkaClientMetrics;
/**
* A consumer factory listener that manages {@link KafkaClientMetrics}.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.5
*
*/
public class MicrometerConsumerListener<K, V> implements ConsumerFactory.Listener<K, V> {
private final MeterRegistry meterRegistry;
private final List<Tag> tags;
private final Map<String, KafkaClientMetrics> metrics = new HashMap<>();
/**
* Construct an instance with the provided registry.
* @param meterRegistry the registry.
*/
public MicrometerConsumerListener(MeterRegistry meterRegistry) {
this(meterRegistry, Collections.emptyList());
}
/**
* Construct an instance with the provided registry and tags.
* @param meterRegistry the registry.
* @param tags the tags.
*/
public MicrometerConsumerListener(MeterRegistry meterRegistry, List<Tag> tags) {
this.meterRegistry = meterRegistry;
this.tags = tags;
}
@Override
public synchronized void consumerAdded(String id, Consumer<K, V> consumer) {
if (!this.metrics.containsKey(id)) {
List<Tag> consumerTags = new ArrayList<>(this.tags);
consumerTags.add(new ImmutableTag("spring.id", id));
this.metrics.put(id, new KafkaClientMetrics(consumer, consumerTags));
this.metrics.get(id).bindTo(this.meterRegistry);
}
}
@Override
public synchronized void consumerRemoved(String id, Consumer<K, V> consumer) {
KafkaClientMetrics removed = this.metrics.remove(id);
if (removed != null) {
removed.close();
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/MicrometerProducerListener.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.kafka.clients.producer.Producer;
import io.micrometer.core.instrument.ImmutableTag;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.Tag;
import io.micrometer.core.instrument.binder.kafka.KafkaClientMetrics;
/**
* A producer factory listener that manages {@link KafkaClientMetrics}.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.5
*
*/
public class MicrometerProducerListener<K, V> implements ProducerFactory.Listener<K, V> {
private final MeterRegistry meterRegistry;
private final List<Tag> tags;
private final Map<String, KafkaClientMetrics> metrics = new HashMap<>();
/**
* Construct an instance with the provided registry.
* @param meterRegistry the registry.
*/
public MicrometerProducerListener(MeterRegistry meterRegistry) {
this(meterRegistry, Collections.emptyList());
}
/**
* Construct an instance with the provided registry and tags.
* @param meterRegistry the registry.
* @param tags the tags.
*/
public MicrometerProducerListener(MeterRegistry meterRegistry, List<Tag> tags) {
this.meterRegistry = meterRegistry;
this.tags = tags;
}
@Override
public synchronized void producerAdded(String id, Producer<K, V> producer) {
if (!this.metrics.containsKey(id)) {
List<Tag> producerTags = new ArrayList<>(this.tags);
producerTags.add(new ImmutableTag("spring.id", id));
this.metrics.put(id, new KafkaClientMetrics(producer, producerTags));
this.metrics.get(id).bindTo(this.meterRegistry);
}
}
@Override
public synchronized void producerRemoved(String id, Producer<K, V> producer) {
KafkaClientMetrics removed = this.metrics.remove(id);
if (removed != null) {
removed.close();
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/ProducerFactory.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.common.serialization.Serializer;
import org.springframework.lang.Nullable;
/**
* The strategy to produce a {@link Producer} instance(s).
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @author Thomas Strauß
*/
public interface ProducerFactory<K, V> {
/**
* The default close timeout duration as 30 seconds.
*/
Duration DEFAULT_PHYSICAL_CLOSE_TIMEOUT = Duration.ofSeconds(30);
/**
* Create a producer which will be transactional if the factory is so configured.
* @return the producer.
* @see #transactionCapable()
*/
Producer<K, V> createProducer();
/**
* Create a producer with an overridden transaction id prefix.
* @param txIdPrefix the transaction id prefix.
* @return the producer.
* @since 2.3
*/
default Producer<K, V> createProducer(@SuppressWarnings("unused") String txIdPrefix) {
throw new UnsupportedOperationException("This factory does not support this method");
}
/**
* Create a non-transactional producer.
* @return the producer.
* @since 2.4.3
* @see #transactionCapable()
*/
default Producer<K, V> createNonTransactionalProducer() {
throw new UnsupportedOperationException("This factory does not support this method");
}
/**
* Return true if the factory supports transactions.
* @return true if transactional.
*/
default boolean transactionCapable() {
return false;
}
/**
* Remove the specified producer from the cache and close it.
* @param transactionIdSuffix the producer's transaction id suffix.
* @since 1.3.8
*/
default void closeProducerFor(String transactionIdSuffix) {
}
/**
* Return the producerPerConsumerPartition.
* @return the producerPerConsumerPartition.
* @since 1.3.8
*/
default boolean isProducerPerConsumerPartition() {
return false;
}
/**
* If the factory implementation uses thread-bound producers, call this method to
* close and release this thread's producer.
* @since 2.3
*/
default void closeThreadBoundProducer() {
}
/**
* Reset any state in the factory, if supported.
* @since 2.4
*/
default void reset() {
}
/**
* Return an unmodifiable reference to the configuration map for this factory.
* Useful for cloning to make a similar factory.
* @return the configs.
* @since 2.5
*/
default Map<String, Object> getConfigurationProperties() {
throw new UnsupportedOperationException("This implementation doesn't support this method");
}
/**
* Return a supplier for a value serializer.
* Useful for cloning to make a similar factory.
* @return the supplier.
* @since 2.5
*/
default Supplier<Serializer<V>> getValueSerializerSupplier() {
return () -> null;
}
/**
* Return a supplier for a key serializer.
* Useful for cloning to make a similar factory.
* @return the supplier.
* @since 2.5
*/
default Supplier<Serializer<K>> getKeySerializerSupplier() {
return () -> null;
}
/**
* Return true when there is a producer per thread.
* @return the producer per thread.
* @since 2.5
*/
default boolean isProducerPerThread() {
return false;
}
/**
* Return the transaction id prefix.
* @return the prefix or null if not configured.
* @since 2.5
*/
@Nullable
default String getTransactionIdPrefix() {
return null;
}
/**
* Get the physical close timeout.
* @return the timeout.
* @since 2.5
*/
default Duration getPhysicalCloseTimeout() {
return DEFAULT_PHYSICAL_CLOSE_TIMEOUT;
}
/**
* Add a listener.
* @param listener the listener.
* @since 2.5.3
*/
default void addListener(Listener<K, V> listener) {
}
/**
* Add a listener at a specific index.
* @param index the index (list position).
* @param listener the listener.
* @since 2.5.3
*/
default void addListener(int index, Listener<K, V> listener) {
}
/**
* Remove a listener.
* @param listener the listener.
* @return true if removed.
* @since 2.5.3
*/
default boolean removeListener(Listener<K, V> listener) {
return false;
}
/**
* Get the current list of listeners.
* @return the listeners.
* @since 2.5.3
*/
default List<Listener<K, V>> getListeners() {
return Collections.emptyList();
}
/**
* Add a post processor.
* @param postProcessor the post processor.
* @since 2.5.3
*/
default void addPostProcessor(ProducerPostProcessor<K, V> postProcessor) {
}
/**
* Remove a post processor.
* @param postProcessor the post processor.
* @return true if removed.
* @since 2.5.3
*/
default boolean removePostProcessor(ProducerPostProcessor<K, V> postProcessor) {
return false;
}
/**
* Get the current list of post processors.
* @return the post processors.
* @since 2.5.3
*/
default List<ProducerPostProcessor<K, V>> getPostProcessors() {
return Collections.emptyList();
}
/**
* Update the producer configuration map; useful for situations such as
* credential rotation.
* @param updates the configuration properties to update.
* @since 2.5.10
*/
default void updateConfigs(Map<String, Object> updates) {
}
/**
* Remove the specified key from the configuration map.
* @param configKey the key to remove.
* @since 2.5.10
*/
default void removeConfig(String configKey) {
}
/**
* Return the configured key serializer (if provided as an object instead
* of a class name in the properties).
* @return the serializer.
* @since 2.8
*/
@Nullable
default Serializer<K> getKeySerializer() {
return null;
}
/**
* Return the configured value serializer (if provided as an object instead
* of a class name in the properties).
* @return the serializer.
* @since 2.8
*/
@Nullable
default Serializer<V> getValueSerializer() {
return null;
}
/**
* Copy the properties of the instance and the given properties to create a new producer factory.
* <p>The copy shall prioritize the override properties over the configured values.
* It is in the responsibility of the factory implementation to make sure the
* configuration of the new factory is identical, complete and correct.</p>
* <p>ProducerPostProcessor and Listeners must stay intact.</p>
* <p>If the factory does not implement this method, an exception will be thrown.</p>
* <p>Note: see
* {@link org.springframework.kafka.core.DefaultKafkaProducerFactory#copyWithConfigurationOverride}</p>
* @param overrideProperties the properties to be applied to the new factory
* @return {@link org.springframework.kafka.core.ProducerFactory} with properties
* applied
* @since 2.5.17
* @see org.springframework.kafka.core.KafkaTemplate#KafkaTemplate(ProducerFactory, java.util.Map)
*/
default ProducerFactory<K, V> copyWithConfigurationOverride(Map<String, Object> overrideProperties) {
throw new UnsupportedOperationException(
"This factory implementation doesn't support creating reconfigured copies.");
}
/**
* Called whenever a producer is added or removed.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @since 2.5
*
*/
interface Listener<K, V> {
/**
* A new producer was created.
* @param id the producer id (factory bean name and client.id separated by a
* period).
* @param producer the producer.
*/
default void producerAdded(String id, Producer<K, V> producer) {
}
/**
* An existing producer was removed.
* @param id the producer id (factory bean name and client.id separated by a period).
* @param producer the producer.
*/
default void producerRemoved(String id, Producer<K, V> producer) {
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/ProducerFactoryUtils.java | /*
* Copyright 2002-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.time.Duration;
import org.apache.kafka.clients.producer.Producer;
import org.springframework.lang.Nullable;
import org.springframework.transaction.support.ResourceHolderSynchronization;
import org.springframework.transaction.support.TransactionSynchronization;
import org.springframework.transaction.support.TransactionSynchronizationManager;
import org.springframework.util.Assert;
/**
* Helper class for managing a Spring based Kafka {@link DefaultKafkaProducerFactory}
* in particular for obtaining transactional Kafka resources for a given ProducerFactory.
*
* <p>
* Mainly for internal use within the framework.
*
* @author Gary Russell
*/
public final class ProducerFactoryUtils {
/**
* The default close timeout (5 seconds).
*/
public static final Duration DEFAULT_CLOSE_TIMEOUT = Duration.ofSeconds(5);
private ProducerFactoryUtils() {
}
/**
* Obtain a Producer that is synchronized with the current transaction, if any.
* @param producerFactory the ProducerFactory to obtain a Channel for
* @param <K> the key type.
* @param <V> the value type.
* @return the resource holder.
*/
public static <K, V> KafkaResourceHolder<K, V> getTransactionalResourceHolder(
final ProducerFactory<K, V> producerFactory) {
return getTransactionalResourceHolder(producerFactory, null, DEFAULT_CLOSE_TIMEOUT);
}
/**
* Obtain a Producer that is synchronized with the current transaction, if any.
* @param producerFactory the ProducerFactory to obtain a Channel for
* @param closeTimeout the producer close timeout.
* @param <K> the key type.
* @param <V> the value type.
* @return the resource holder.
* @since 2.1.14
*/
public static <K, V> KafkaResourceHolder<K, V> getTransactionalResourceHolder(
final ProducerFactory<K, V> producerFactory, Duration closeTimeout) {
return getTransactionalResourceHolder(producerFactory, null, closeTimeout);
}
/**
* Obtain a Producer that is synchronized with the current transaction, if any.
* @param producerFactory the ProducerFactory to obtain a Channel for
* @param txIdPrefix the transaction id prefix; if null, the producer factory
* prefix is used.
* @param closeTimeout the producer close timeout.
* @param <K> the key type.
* @param <V> the value type.
* @return the resource holder.
* @since 2.3
*/
public static <K, V> KafkaResourceHolder<K, V> getTransactionalResourceHolder(
final ProducerFactory<K, V> producerFactory, @Nullable String txIdPrefix, Duration closeTimeout) {
Assert.notNull(producerFactory, "ProducerFactory must not be null");
@SuppressWarnings("unchecked")
KafkaResourceHolder<K, V> resourceHolder = (KafkaResourceHolder<K, V>) TransactionSynchronizationManager
.getResource(producerFactory);
if (resourceHolder == null) {
Producer<K, V> producer = producerFactory.createProducer(txIdPrefix);
try {
producer.beginTransaction();
}
catch (RuntimeException e) {
producer.close(closeTimeout);
throw e;
}
resourceHolder = new KafkaResourceHolder<K, V>(producer, closeTimeout);
bindResourceToTransaction(resourceHolder, producerFactory);
}
return resourceHolder;
}
public static <K, V> void releaseResources(@Nullable KafkaResourceHolder<K, V> resourceHolder) {
if (resourceHolder != null) {
resourceHolder.close();
}
}
private static <K, V> void bindResourceToTransaction(KafkaResourceHolder<K, V> resourceHolder,
ProducerFactory<K, V> producerFactory) {
TransactionSynchronizationManager.bindResource(producerFactory, resourceHolder);
resourceHolder.setSynchronizedWithTransaction(true);
if (TransactionSynchronizationManager.isSynchronizationActive()) {
TransactionSynchronizationManager
.registerSynchronization(new KafkaResourceSynchronization<K, V>(resourceHolder, producerFactory));
}
}
/**
* Callback for resource cleanup at the end of a non-native Kafka transaction (e.g. when participating in a
* JtaTransactionManager transaction).
* @see org.springframework.transaction.jta.JtaTransactionManager
*/
private static final class KafkaResourceSynchronization<K, V> extends
ResourceHolderSynchronization<KafkaResourceHolder<K, V>, Object> {
private final KafkaResourceHolder<K, V> resourceHolder;
KafkaResourceSynchronization(KafkaResourceHolder<K, V> resourceHolder, Object resourceKey) {
super(resourceHolder, resourceKey);
this.resourceHolder = resourceHolder;
}
@Override
protected boolean shouldReleaseBeforeCompletion() {
return false;
}
@Override
protected void processResourceAfterCommit(KafkaResourceHolder<K, V> resourceHolder) {
resourceHolder.commit();
}
@Override
public void afterCompletion(int status) {
try {
if (status != TransactionSynchronization.STATUS_COMMITTED) {
this.resourceHolder.rollback();
}
}
finally {
super.afterCompletion(status);
}
}
@Override
protected void releaseResource(KafkaResourceHolder<K, V> holder, Object resourceKey) {
ProducerFactoryUtils.releaseResources(holder);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/ProducerPostProcessor.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.util.function.Function;
import org.apache.kafka.clients.producer.Producer;
/**
* Called by producer factories to perform post processing on newly created producers.
*
* @param <K> the key type.
* @param <V> the value type
*
* @author Gary Russell
* @since 2.5.3
*
*/
public interface ProducerPostProcessor<K, V> extends Function<Producer<K, V>, Producer<K, V>> {
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/RoutingKafkaTemplate.java | /*
* Copyright 2020-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.regex.Pattern;
import org.apache.kafka.clients.consumer.ConsumerGroupMetadata;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.TopicPartition;
import org.springframework.util.Assert;
/**
* A {@link KafkaTemplate} that routes messages based on the topic name. Does not support
* transactions, {@link #flush()}, {@link #metrics()}, and
* {@link #execute(org.springframework.kafka.core.KafkaOperations.ProducerCallback)},
* only simple send operations.
*
* @author Gary Russell
* @since 2.5
*
*/
public class RoutingKafkaTemplate extends KafkaTemplate<Object, Object> {
private static final String THIS_METHOD_IS_NOT_SUPPORTED = "This method is not supported";
private final Map<Pattern, ProducerFactory<Object, Object>> factoryMatchers;
private final ConcurrentMap<String, ProducerFactory<Object, Object>> factoryMap = new ConcurrentHashMap<>();
/**
* Construct an instance with the provided properties. The topic patterns will be
* traversed in order so an ordered map, such as {@link LinkedHashMap} should be used
* with more specific patterns declared first.
* @param factories the factories.
*/
public RoutingKafkaTemplate(Map<Pattern, ProducerFactory<Object, Object>> factories) {
super(new ProducerFactory<Object, Object>() {
@Override
public Producer<Object, Object> createProducer() {
throw new UnsupportedOperationException();
}
});
this.factoryMatchers = new LinkedHashMap<>(factories);
Optional<Boolean> transactional = factories.values().stream()
.map(fact -> fact.transactionCapable())
.findFirst();
Assert.isTrue(!transactional.isPresent() || !transactional.get(), "Transactional factories are not supported");
}
@Override
public ProducerFactory<Object, Object> getProducerFactory() {
throw new UnsupportedOperationException(THIS_METHOD_IS_NOT_SUPPORTED);
}
@Override
public ProducerFactory<Object, Object> getProducerFactory(String topic) {
ProducerFactory<Object, Object> producerFactory = this.factoryMap.computeIfAbsent(topic, key -> {
for (Entry<Pattern, ProducerFactory<Object, Object>> entry : this.factoryMatchers.entrySet()) {
if (entry.getKey().matcher(topic).matches()) {
return entry.getValue();
}
}
return null;
});
Assert.state(producerFactory != null, "No producer factory found for topic: " + topic);
return producerFactory;
}
@Override
public <T> T execute(ProducerCallback<Object, Object, T> callback) {
throw new UnsupportedOperationException(THIS_METHOD_IS_NOT_SUPPORTED);
}
@Override
public <T> T executeInTransaction(OperationsCallback<Object, Object, T> callback) {
throw new UnsupportedOperationException(THIS_METHOD_IS_NOT_SUPPORTED);
}
@Override
@Deprecated
public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) {
throw new UnsupportedOperationException(THIS_METHOD_IS_NOT_SUPPORTED);
}
@Override
@Deprecated
public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets) {
throw new UnsupportedOperationException(THIS_METHOD_IS_NOT_SUPPORTED);
}
@Override
public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
ConsumerGroupMetadata groupMetadata) {
throw new UnsupportedOperationException(THIS_METHOD_IS_NOT_SUPPORTED);
}
@Override
public Map<MetricName, ? extends Metric> metrics() {
throw new UnsupportedOperationException(THIS_METHOD_IS_NOT_SUPPORTED);
}
@Override
public void flush() {
throw new UnsupportedOperationException(THIS_METHOD_IS_NOT_SUPPORTED);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/package-info.java | /**
* Package for kafka core components
*/
@org.springframework.lang.NonNullApi
package org.springframework.kafka.core;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/reactive/ReactiveKafkaConsumerTemplate.java | /*
* Copyright 2019-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core.reactive;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.springframework.util.Assert;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.kafka.receiver.KafkaReceiver;
import reactor.kafka.receiver.ReceiverOptions;
import reactor.kafka.receiver.ReceiverRecord;
import reactor.kafka.sender.TransactionManager;
import reactor.util.function.Tuple2;
import reactor.util.function.Tuples;
/**
* Reactive kafka consumer operations implementation.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Mark Norkin
*
* @since 2.3.0
*/
public class ReactiveKafkaConsumerTemplate<K, V> {
private final KafkaReceiver<K, V> kafkaReceiver;
public ReactiveKafkaConsumerTemplate(ReceiverOptions<K, V> receiverOptions) {
Assert.notNull(receiverOptions, "Receiver options can not be null");
this.kafkaReceiver = KafkaReceiver.create(receiverOptions);
}
public Flux<ReceiverRecord<K, V>> receive() {
return this.kafkaReceiver.receive();
}
public Flux<ConsumerRecord<K, V>> receiveAutoAck() {
return this.kafkaReceiver.receiveAutoAck().concatMap(Function.identity());
}
public Flux<ConsumerRecord<K, V>> receiveAtMostOnce() {
return this.kafkaReceiver.receiveAtmostOnce();
}
/**
* Returns a {@link Flux} of consumer record batches that may be used for exactly once
* delivery semantics. A new transaction is started for each inner Flux and it is the
* responsibility of the consuming application to commit or abort the transaction
* using {@link TransactionManager#commit()} or {@link TransactionManager#abort()}
* after processing the Flux. The next batch of consumer records will be delivered only
* after the previous flux terminates. Offsets of records dispatched on each inner Flux
* are committed using the provided <code>transactionManager</code> within the transaction
* started for that Flux.
* <p> Example usage:
* <pre>
* {@code
* KafkaSender<Integer, Person> sender = sender(senderOptions());
* ReceiverOptions<Integer, Person> receiverOptions = receiverOptions(Collections.singleton(sourceTopic));
* KafkaReceiver<Integer, Person> receiver = KafkaReceiver.create(receiverOptions);
* receiver.receiveExactlyOnce(sender.transactionManager())
* .concatMap(f -> sendAndCommit(f))
* .onErrorResume(e -> sender.transactionManager().abort().then(Mono.error(e)))
* .doOnCancel(() -> close());
*
* Flux<SenderResult<Integer>> sendAndCommit(Flux<ConsumerRecord<Integer, Person>> flux) {
* return sender.send(flux.map(r -> SenderRecord.<Integer, Person, Integer>create(transform(r.value()), r.key())))
* .concatWith(sender.transactionManager().commit());
* }
* }
* </pre>
* @param transactionManager Transaction manager used to begin new transaction for each
* inner Flux and commit offsets within that transaction
* @return Flux of consumer record batches processed within a transaction
*/
public Flux<Flux<ConsumerRecord<K, V>>> receiveExactlyOnce(TransactionManager transactionManager) {
return this.kafkaReceiver.receiveExactlyOnce(transactionManager);
}
public <T> Mono<T> doOnConsumer(Function<Consumer<K, V>, ? extends T> function) {
return this.kafkaReceiver.doOnConsumer(function);
}
public Flux<TopicPartition> assignment() {
Mono<Set<TopicPartition>> partitions = doOnConsumer(Consumer::assignment);
return partitions.flatMapIterable(Function.identity());
}
public Flux<String> subscription() {
Mono<Set<String>> subscriptions = doOnConsumer(Consumer::subscription);
return subscriptions.flatMapIterable(Function.identity());
}
public Mono<Void> seek(TopicPartition partition, long offset) {
return doOnConsumer(consumer -> {
consumer.seek(partition, offset);
return null;
});
}
public Mono<Void> seekToBeginning(TopicPartition... partitions) {
return doOnConsumer(consumer -> {
consumer.seekToBeginning(Arrays.asList(partitions));
return null;
});
}
public Mono<Void> seekToEnd(TopicPartition... partitions) {
return doOnConsumer(consumer -> {
consumer.seekToEnd(Arrays.asList(partitions));
return null;
});
}
public Mono<Long> position(TopicPartition partition) {
return doOnConsumer(consumer -> consumer.position(partition));
}
public Mono<Map<TopicPartition, OffsetAndMetadata>> committed(Set<TopicPartition> partitions) {
return doOnConsumer(consumer -> consumer.committed(partitions));
}
public Flux<PartitionInfo> partitionsFromConsumerFor(String topic) {
Mono<List<PartitionInfo>> partitions = doOnConsumer(c -> c.partitionsFor(topic));
return partitions.flatMapIterable(Function.identity());
}
public Flux<TopicPartition> paused() {
Mono<Set<TopicPartition>> paused = doOnConsumer(Consumer::paused);
return paused.flatMapIterable(Function.identity());
}
public Mono<Void> pause(TopicPartition... partitions) {
return doOnConsumer(c -> {
c.pause(Arrays.asList(partitions));
return null;
});
}
public Mono<Void> resume(TopicPartition... partitions) {
return doOnConsumer(c -> {
c.resume(Arrays.asList(partitions));
return null;
});
}
public Flux<Tuple2<MetricName, ? extends Metric>> metricsFromConsumer() {
return doOnConsumer(Consumer::metrics)
.flatMapIterable(Map::entrySet)
.map(m -> Tuples.of(m.getKey(), m.getValue()));
}
public Flux<Tuple2<String, List<PartitionInfo>>> listTopics() {
return doOnConsumer(Consumer::listTopics)
.flatMapIterable(Map::entrySet)
.map(topicAndPartition -> Tuples.of(topicAndPartition.getKey(), topicAndPartition.getValue()));
}
public Flux<Tuple2<TopicPartition, OffsetAndTimestamp>> offsetsForTimes(
Map<TopicPartition, Long> timestampsToSearch) {
return doOnConsumer(c -> c.offsetsForTimes(timestampsToSearch))
.flatMapIterable(Map::entrySet)
.map(partitionAndOffset -> Tuples.of(partitionAndOffset.getKey(), partitionAndOffset.getValue()));
}
public Flux<Tuple2<TopicPartition, Long>> beginningOffsets(TopicPartition... partitions) {
return doOnConsumer(c -> c.beginningOffsets(Arrays.asList(partitions)))
.flatMapIterable(Map::entrySet)
.map(partitionsOffsets -> Tuples.of(partitionsOffsets.getKey(), partitionsOffsets.getValue()));
}
public Flux<Tuple2<TopicPartition, Long>> endOffsets(TopicPartition... partitions) {
return doOnConsumer(c -> c.endOffsets(Arrays.asList(partitions)))
.flatMapIterable(Map::entrySet)
.map(partitionsOffsets -> Tuples.of(partitionsOffsets.getKey(), partitionsOffsets.getValue()));
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/reactive/ReactiveKafkaProducerTemplate.java | /*
* Copyright 2019-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.core.reactive;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.PartitionInfo;
import org.reactivestreams.Publisher;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.converter.MessagingMessageConverter;
import org.springframework.kafka.support.converter.RecordMessageConverter;
import org.springframework.messaging.Message;
import org.springframework.util.Assert;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.kafka.sender.KafkaSender;
import reactor.kafka.sender.SenderOptions;
import reactor.kafka.sender.SenderRecord;
import reactor.kafka.sender.SenderResult;
import reactor.kafka.sender.TransactionManager;
import reactor.util.function.Tuple2;
import reactor.util.function.Tuples;
/**
* Reactive kafka producer operations implementation.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Mark Norkin
*
* @since 2.3.0
*/
public class ReactiveKafkaProducerTemplate<K, V> implements AutoCloseable, DisposableBean {
private final KafkaSender<K, V> sender;
private final RecordMessageConverter messageConverter;
public ReactiveKafkaProducerTemplate(SenderOptions<K, V> senderOptions) {
this(senderOptions, new MessagingMessageConverter());
}
public ReactiveKafkaProducerTemplate(SenderOptions<K, V> senderOptions, RecordMessageConverter messageConverter) {
Assert.notNull(senderOptions, "Sender options can not be null");
Assert.notNull(messageConverter, "Message converter can not be null");
this.sender = KafkaSender.create(senderOptions);
this.messageConverter = messageConverter;
}
public <T> Flux<SenderResult<T>> sendTransactionally(Publisher<? extends SenderRecord<K, V, T>> records) {
Flux<Flux<SenderResult<T>>> sendTransactionally = this.sender.sendTransactionally(Flux.just(records));
return sendTransactionally.flatMap(Function.identity());
}
public <T> Mono<SenderResult<T>> sendTransactionally(SenderRecord<K, V, T> record) {
Flux<SenderResult<T>> sendTransactionally = sendTransactionally(Mono.just(record));
return sendTransactionally.single();
}
public Mono<SenderResult<Void>> send(String topic, V value) {
return send(new ProducerRecord<>(topic, value));
}
public Mono<SenderResult<Void>> send(String topic, K key, V value) {
return send(new ProducerRecord<>(topic, key, value));
}
public Mono<SenderResult<Void>> send(String topic, int partition, K key, V value) {
return send(new ProducerRecord<>(topic, partition, key, value));
}
public Mono<SenderResult<Void>> send(String topic, int partition, long timestamp, K key, V value) {
return send(new ProducerRecord<>(topic, partition, timestamp, key, value));
}
public Mono<SenderResult<Void>> send(String topic, Message<?> message) {
@SuppressWarnings("unchecked")
ProducerRecord<K, V> producerRecord = (ProducerRecord<K, V>) this.messageConverter.fromMessage(message, topic);
if (!producerRecord.headers().iterator().hasNext()) { // possibly no Jackson
byte[] correlationId = message.getHeaders().get(KafkaHeaders.CORRELATION_ID, byte[].class);
if (correlationId != null) {
producerRecord.headers().add(KafkaHeaders.CORRELATION_ID, correlationId);
}
}
return send(producerRecord);
}
public Mono<SenderResult<Void>> send(ProducerRecord<K, V> record) {
return send(SenderRecord.create(record, null));
}
public <T> Mono<SenderResult<T>> send(SenderRecord<K, V, T> record) {
return send(Mono.just(record)).single();
}
public <T> Flux<SenderResult<T>> send(Publisher<? extends SenderRecord<K, V, T>> records) {
return this.sender.send(records);
}
/**
* Flush the producer.
* @return {@link Mono#empty()}.
* @deprecated - flush does not make sense in the context of a reactive flow since,
* the send completion signal is a send result, which implies that a flush is
* redundant. If you use this method with reactor-kafka 1.3 or later, it must be
* scheduled to avoid a deadlock; see
* https://issues.apache.org/jira/browse/KAFKA-10790 (since 2.7).
*/
@Deprecated
public Mono<?> flush() {
return doOnProducer(producer -> {
producer.flush();
return Mono.empty();
});
}
public Flux<PartitionInfo> partitionsFromProducerFor(String topic) {
Mono<List<PartitionInfo>> partitionsInfo = doOnProducer(producer -> producer.partitionsFor(topic));
return partitionsInfo.flatMapIterable(Function.identity());
}
public Flux<Tuple2<MetricName, ? extends Metric>> metricsFromProducer() {
return doOnProducer(Producer::metrics)
.flatMapIterable(Map::entrySet)
.map(m -> Tuples.of(m.getKey(), m.getValue()));
}
public <T> Mono<T> doOnProducer(Function<Producer<K, V>, ? extends T> action) {
return this.sender.doOnProducer(action);
}
public TransactionManager transactionManager() {
return this.sender.transactionManager();
}
@Override
public void destroy() {
doClose();
}
@Override
public void close() {
doClose();
}
private void doClose() {
this.sender.close();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/core/reactive/package-info.java | /**
* Reactive component for consumer and producer.
*/
@org.springframework.lang.NonNullApi
package org.springframework.kafka.core.reactive;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ConsumerFailedToStartEvent.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
/**
* An event published when a consumer fails to start.
*
* @author Gary Russell
* @since 2.3
*
*/
public class ConsumerFailedToStartEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
/**
* Construct an instance with the provided source and container.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
*/
public ConsumerFailedToStartEvent(Object source, Object container) {
super(source, container);
}
@Override
public String toString() {
return "ConsumerFailedToStartEvent [source=" + getSource() + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ConsumerPartitionPausedEvent.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
import org.apache.kafka.common.TopicPartition;
/**
* An event published when a consumer partition is paused.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public class ConsumerPartitionPausedEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
private final TopicPartition partition;
/**
* Construct an instance with the provided source and partition.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
* @param partition the partition.
* @since 2.7
*/
public ConsumerPartitionPausedEvent(Object source, Object container, TopicPartition partition) {
super(source, container);
this.partition = partition;
}
/**
* Return the paused partition.
* @return the partition.
*/
public TopicPartition getPartitions() {
return this.partition;
}
@Override
public String toString() {
return "ConsumerPausedEvent [partitions=" + this.partition + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ConsumerPartitionResumedEvent.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
import org.apache.kafka.common.TopicPartition;
/**
* An event published when a consumer partition is resumed.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public class ConsumerPartitionResumedEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
private final TopicPartition partition;
/**
* Construct an instance with the provided source and partition.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
* @param partition the partition.
* @since 2.7
*/
public ConsumerPartitionResumedEvent(Object source, Object container,
TopicPartition partition) {
super(source, container);
this.partition = partition;
}
public TopicPartition getPartition() {
return this.partition;
}
@Override
public String toString() {
return "ConsumerPartitionResumedEvent [partition=" + this.partition + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ConsumerPausedEvent.java | /*
* Copyright 2018-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
import java.util.Collection;
import org.apache.kafka.common.TopicPartition;
/**
* An event published when a consumer is paused.
*
* @author Gary Russell
* @since 2.1.5
*
*/
public class ConsumerPausedEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
private final Collection<TopicPartition> partitions;
/**
* Construct an instance with the provided source and partitions.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
* @param partitions the partitions.
* @since 2.2.1
*/
public ConsumerPausedEvent(Object source, Object container, Collection<TopicPartition> partitions) {
super(source, container);
this.partitions = partitions;
}
/**
* Return the paused partitions.
* @return the partitions.
*/
public Collection<TopicPartition> getPartitions() {
return this.partitions;
}
@Override
public String toString() {
return "ConsumerPausedEvent [partitions=" + this.partitions + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ConsumerResumedEvent.java | /*
* Copyright 2018-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
import java.util.Collection;
import org.apache.kafka.common.TopicPartition;
/**
* An event published when a consumer is resumed.
*
* @author Gary Russell
* @since 2.1.5
*
*/
public class ConsumerResumedEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
private final Collection<TopicPartition> partitions;
/**
* Construct an instance with the provided source and partitions.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
* @param partitions the partitions.
* @since 2.2.1
*/
public ConsumerResumedEvent(Object source, Object container,
Collection<TopicPartition> partitions) {
super(source, container);
this.partitions = partitions;
}
public Collection<TopicPartition> getPartitions() {
return this.partitions;
}
@Override
public String toString() {
return "ConsumerResumedEvent [partitions=" + this.partitions + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ConsumerStartedEvent.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
/**
* An event published when a consumer has started.
*
* @author Gary Russell
* @since 2.3
*
*/
public class ConsumerStartedEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
/**
* Construct an instance with the provided source and container.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
*/
public ConsumerStartedEvent(Object source, Object container) {
super(source, container);
}
@Override
public String toString() {
return "ConsumerStartedEvent [source=" + getSource() + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ConsumerStartingEvent.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
/**
* An event published when a consumer is initializing.
*
* @author Gary Russell
* @since 2.3
*
*/
public class ConsumerStartingEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
/**
* Construct an instance with the provided source and container.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
*/
public ConsumerStartingEvent(Object source, Object container) {
super(source, container);
}
@Override
public String toString() {
return "ConsumerStartingEvent [source=" + getSource() + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ConsumerStoppedEvent.java | /*
* Copyright 2018-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
/**
* An event published when a consumer is stopped. While it is best practice to use
* stateless listeners, you can consume this event to clean up any thread-based resources
* (remove ThreadLocals, destroy thread-scoped beans etc), as long as the context event
* multicaster is not modified to use an async task executor. You can also use this event
* to restart a container that was stopped because a transactional producer was fenced.
*
* @author Gary Russell
* @since 2.2
*
*/
public class ConsumerStoppedEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
/**
* Reasons for stopping a consumer.
* @since 2.5.9
*
*/
public enum Reason {
/**
* The consumer was stopped because the container was stopped.
*/
NORMAL,
/**
* The transactional producer was fenced and the container
* {@code stopContainerWhenFenced} property is true.
*/
FENCED,
/**
* An authorization exception occurred.
* @since 2.5.10
*/
AUTH,
/**
* No offset found for a partition and no reset policy.
* @since 2.5.10
*/
NO_OFFSET,
/**
* A {@link java.lang.Error} was thrown.
*/
ERROR
}
private final Reason reason;
/**
* Construct an instance with the provided source and container.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
* @param reason the reason.
* @since 2.5.8
*/
public ConsumerStoppedEvent(Object source, Object container, Reason reason) {
super(source, container);
this.reason = reason;
}
/**
* Return the reason why the consumer was stopped.
* @return the reason.
* @since 2.5.8
*/
public Reason getReason() {
return this.reason;
}
@Override
public String toString() {
return "ConsumerStoppedEvent [source=" + getSource() + ", reason=" + this.reason + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ConsumerStoppingEvent.java | /*
* Copyright 2018-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
import java.util.Collection;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.TopicPartition;
/**
* An event published when a consumer is stopped. While it is best practice to use
* stateless listeners, you can consume this event to clean up any thread-based resources
* (remove ThreadLocals, destroy thread-scoped beans etc), as long as the context event
* multicaster is not modified to use an async task executor.
*
* @author Gary Russell
* @since 2.2
*
*/
public class ConsumerStoppingEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
private transient Consumer<?, ?> consumer;
private final Collection<TopicPartition> partitions;
/**
* Construct an instance with the provided source, consumer and partitions.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
* @param consumer the consumer.
* @param partitions the partitions.
* @since 2.2.1
*/
public ConsumerStoppingEvent(Object source, Object container,
Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
super(source, container);
this.consumer = consumer;
this.partitions = partitions;
}
public Consumer<?, ?> getConsumer() {
return this.consumer;
}
public Collection<TopicPartition> getPartitions() {
return this.partitions;
}
@Override
public String toString() {
return "ConsumerStoppingEvent [consumer=" + this.consumer + ", partitions=" + this.partitions + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ContainerStoppedEvent.java | /*
* Copyright 2018-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
/**
* An event published when a container is stopped.
*
* @author Gary Russell
* @since 2.2
*
*/
public class ContainerStoppedEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
/**
* Construct an instance with the provided source and container.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
* @since 2.2.1
*/
public ContainerStoppedEvent(Object source, Object container) {
super(source, container);
}
@Override
public String toString() {
return "ContainerStoppedEvent [source=" + getSource() + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/KafkaEvent.java | /*
* Copyright 2015-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
import org.springframework.context.ApplicationEvent;
import org.springframework.util.Assert;
/**
* Base class for events.
*
* @author Gary Russell
*
*/
public abstract class KafkaEvent extends ApplicationEvent {
private static final long serialVersionUID = 1L;
private final Object container;
public KafkaEvent(Object source, Object container) {
super(source);
this.container = container;
}
/**
* Get the container for which the event was published, which will be the parent
* container if the source that emitted the event is a child container, or the source
* itself otherwise. The type is required here to avoid a dependency tangle between
* the event and listener packages.
* @param type the container type (e.g. {@code MessageListenerContainer.class}).
* @param <T> the type.
* @return the container.
* @since 2.2.1
* @see #getSource(Class)
*/
@SuppressWarnings("unchecked")
public <T> T getContainer(Class<T> type) {
Assert.isInstanceOf(type, this.container);
return (T) this.container;
}
/**
* Get the container (source) that published the event. This is provided as an
* alternative to {@link #getSource()} to avoid the need to cast in user code. The
* type is required here to avoid a dependency tangle between the event and listener
* packages.
* @param type the container type (e.g. {@code MessageListenerContainer.class}).
* @param <T> the type.
* @return the container.
* @since 2.2.1
* @see #getContainer(Class)
* @see #getSource()
*/
@SuppressWarnings("unchecked")
public <T> T getSource(Class<T> type) {
Assert.isInstanceOf(type, getSource());
return (T) getSource();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ListenerContainerIdleEvent.java | /*
* Copyright 2016-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.TopicPartition;
/**
* An event that is emitted when a container is idle if the container
* is configured to do so.
*
* @author Gary Russell
*
*/
public class ListenerContainerIdleEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
private final long idleTime;
private final String listenerId;
private final List<TopicPartition> topicPartitions;
private final boolean paused;
private transient Consumer<?, ?> consumer;
/**
* Construct an instance with the provided arguments.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
* @param idleTime the idle time.
* @param id the container id.
* @param topicPartitions the topics/partitions currently assigned.
* @param consumer the consumer.
* @param paused true if the consumer is paused.
* @since 2.2.1
*/
public ListenerContainerIdleEvent(Object source, Object container,
long idleTime, String id,
Collection<TopicPartition> topicPartitions, Consumer<?, ?> consumer, boolean paused) {
super(source, container);
this.idleTime = idleTime;
this.listenerId = id;
this.topicPartitions = topicPartitions == null ? null : new ArrayList<>(topicPartitions);
this.consumer = consumer;
this.paused = paused;
}
/**
* How long the container has been idle.
* @return the time in milliseconds.
*/
public long getIdleTime() {
return this.idleTime;
}
/**
* The TopicPartitions the container is listening to.
* @return the TopicPartition list.
*/
public Collection<TopicPartition> getTopicPartitions() {
return this.topicPartitions == null ? null : Collections.unmodifiableList(this.topicPartitions);
}
/**
* The id of the listener (if {@code @KafkaListener}) or the container bean name.
* @return the id.
*/
public String getListenerId() {
return this.listenerId;
}
/**
* Retrieve the consumer. Only populated if the listener is consumer-aware.
* Allows the listener to resume a paused consumer.
* @return the consumer.
* @since 2.0
*/
public Consumer<?, ?> getConsumer() {
return this.consumer;
}
/**
* Return true if the consumer was paused at the time the idle event was published.
* @return paused.
* @since 2.1.5
*/
public boolean isPaused() {
return this.paused;
}
@Override
public String toString() {
return "ListenerContainerIdleEvent [idleTime="
+ ((float) this.idleTime / 1000) + "s, listenerId=" + this.listenerId // NOSONAR magic #
+ ", container=" + getSource()
+ ", paused=" + this.paused
+ ", topicPartitions=" + this.topicPartitions + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ListenerContainerNoLongerIdleEvent.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.TopicPartition;
/**
* An event that is emitted when a container is no longer idle if configured to publish
* idle events.
*
* @author Gary Russell
* @since 2.6.2
*/
public class ListenerContainerNoLongerIdleEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
private final long idleTime;
private final String listenerId;
private final List<TopicPartition> topicPartitions;
private transient Consumer<?, ?> consumer;
/**
* Construct an instance with the provided arguments.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
* @param idleTime how long the container was idle.
* @param id the container id.
* @param topicPartitions the topics/partitions currently assigned.
* @param consumer the consumer.
*/
public ListenerContainerNoLongerIdleEvent(Object source, Object container, long idleTime, String id,
Collection<TopicPartition> topicPartitions, Consumer<?, ?> consumer) {
super(source, container);
this.idleTime = idleTime;
this.listenerId = id;
this.topicPartitions = topicPartitions == null ? null : new ArrayList<>(topicPartitions);
this.consumer = consumer;
}
/**
* The TopicPartitions the container is listening to.
* @return the TopicPartition list.
*/
public Collection<TopicPartition> getTopicPartitions() {
return this.topicPartitions == null ? null : Collections.unmodifiableList(this.topicPartitions);
}
/**
* How long the container was idle.
* @return the time in milliseconds.
*/
public long getIdleTime() {
return this.idleTime;
}
/**
* The id of the listener (if {@code @KafkaListener}) or the container bean name.
* @return the id.
*/
public String getListenerId() {
return this.listenerId;
}
/**
* Retrieve the consumer. Only populated if the listener is consumer-aware.
* Allows the listener to resume a paused consumer.
* @return the consumer.
*/
public Consumer<?, ?> getConsumer() {
return this.consumer;
}
@Override
public String toString() {
return "ListenerContainerNoLongerIdleEvent [idleTime="
+ ((float) this.idleTime / 1000) + "s, listenerId=" + this.listenerId // NOSONAR magic #
+ ", container=" + getSource()
+ ", topicPartitions=" + this.topicPartitions + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ListenerContainerPartitionIdleEvent.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.TopicPartition;
/**
* An event that is emitted when a container partition is idle if the container
* is configured to do so.
*
* @author Tomaz Fernandes
* @since 2.7
*/
public class ListenerContainerPartitionIdleEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
private final long idleTime;
private final String listenerId;
private final TopicPartition topicPartition;
private final boolean paused;
private transient Consumer<?, ?> consumer;
/**
* Construct an instance with the provided arguments.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
* @param idleTime the idle time.
* @param id the container id.
* @param topicPartition the topic/partition.
* @param consumer the consumer.
* @param paused true if the consumer partition is paused.
* @since 2.7
*/
public ListenerContainerPartitionIdleEvent(Object source, Object container,
long idleTime, String id,
TopicPartition topicPartition, Consumer<?, ?> consumer, boolean paused) {
super(source, container);
this.idleTime = idleTime;
this.listenerId = id;
this.topicPartition = topicPartition;
this.consumer = consumer;
this.paused = paused;
}
/**
* How long the partition has been idle.
* @return the time in milliseconds.
*/
public long getIdleTime() {
return this.idleTime;
}
/**
* The idle TopicPartition.
* @return the TopicPartition.
*/
public TopicPartition getTopicPartition() {
return this.topicPartition;
}
/**
* The id of the listener (if {@code @KafkaListener}) or the container bean name.
* @return the id.
*/
public String getListenerId() {
return this.listenerId;
}
/**
* Retrieve the consumer. Only populated if the listener is consumer-aware.
* Allows the listener to resume a paused consumer.
* @return the consumer.
* @since 2.0
*/
public Consumer<?, ?> getConsumer() {
return this.consumer;
}
/**
* Return true if the consumer was paused at the time the idle event was published.
* @return paused.
* @since 2.1.5
*/
public boolean isPaused() {
return this.paused;
}
@Override
public String toString() {
return "ListenerContainerIdleEvent [idleTime="
+ ((float) this.idleTime / 1000) + "s, listenerId=" + this.listenerId // NOSONAR magic #
+ ", container=" + getSource()
+ ", paused=" + this.paused
+ ", topicPartition=" + this.topicPartition + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/ListenerContainerPartitionNoLongerIdleEvent.java | /*
* Copyright 2020-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.TopicPartition;
/**
* An event that is emitted when a partition is no longer idle if configured to publish
* idle events.
*
* @author Gary Russell
* @since 2.6.2
*/
public class ListenerContainerPartitionNoLongerIdleEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
private final long idleTime;
private final String listenerId;
private final TopicPartition topicPartition;
private transient Consumer<?, ?> consumer;
/**
* Construct an instance with the provided arguments.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
* @param idleTime how long the container was idle.
* @param id the container id.
* @param topicPartition the idle topic/partition.
* @param consumer the consumer.
*/
public ListenerContainerPartitionNoLongerIdleEvent(Object source, Object container, long idleTime, String id,
TopicPartition topicPartition, Consumer<?, ?> consumer) {
super(source, container);
this.idleTime = idleTime;
this.listenerId = id;
this.topicPartition = topicPartition;
this.consumer = consumer;
}
/**
* The idle TopicPartition.
* @return the TopicPartition.
*/
public TopicPartition getTopicPartition() {
return this.topicPartition;
}
/**
* How long the partition was idle.
* @return the time in milliseconds.
*/
public long getIdleTime() {
return this.idleTime;
}
/**
* The id of the listener (if {@code @KafkaListener}) or the container bean name.
* @return the id.
*/
public String getListenerId() {
return this.listenerId;
}
/**
* Retrieve the consumer. Only populated if the listener is consumer-aware.
* Allows the listener to resume a paused consumer.
* @return the consumer.
*/
public Consumer<?, ?> getConsumer() {
return this.consumer;
}
@Override
public String toString() {
return "ListenerContainerNoLongerIdleEvent [idleTime="
+ ((float) this.idleTime / 1000) + "s, listenerId=" + this.listenerId // NOSONAR magic #
+ ", container=" + getSource()
+ ", topicPartitios=" + this.topicPartition + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/NonResponsiveConsumerEvent.java | /*
* Copyright 2017-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.event;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.TopicPartition;
/**
* An event that is emitted when a consumer is not responding to the poll; with early
* versions of the kafka-clients, this was a possible indication that the broker is down.
*
* @author Gary Russell
* @since 1.3.1
*
*/
public class NonResponsiveConsumerEvent extends KafkaEvent {
private static final long serialVersionUID = 1L;
private final long timeSinceLastPoll;
private final String listenerId;
private final List<TopicPartition> topicPartitions;
private transient Consumer<?, ?> consumer;
/**
* Construct an instance with the provided properties.
* @param source the container instance that generated the event.
* @param container the container or the parent container if the container is a child.
* @param timeSinceLastPoll the time since the last poll.
* @param id the container id.
* @param topicPartitions the topic partitions.
* @param consumer the consumer.
* @since 2.2.1
*/
public NonResponsiveConsumerEvent(Object source, Object container,
long timeSinceLastPoll, String id,
Collection<TopicPartition> topicPartitions, Consumer<?, ?> consumer) {
super(source, container);
this.timeSinceLastPoll = timeSinceLastPoll;
this.listenerId = id;
this.topicPartitions = topicPartitions == null ? null : new ArrayList<>(topicPartitions);
this.consumer = consumer;
}
/**
* How long since the last poll.
* @return the time in milliseconds.
*/
public long getTimeSinceLastPoll() {
return this.timeSinceLastPoll;
}
/**
* The TopicPartitions the container is listening to.
* @return the TopicPartition list.
*/
public Collection<TopicPartition> getTopicPartitions() {
return this.topicPartitions == null ? null : Collections.unmodifiableList(this.topicPartitions);
}
/**
* The id of the listener (if {@code @KafkaListener}) or the container bean name.
* @return the id.
*/
public String getListenerId() {
return this.listenerId;
}
/**
* Retrieve the consumer. Only populated if the listener is consumer-aware.
* Allows the listener to resume a paused consumer.
* @return the consumer.
*/
public Consumer<?, ?> getConsumer() {
return this.consumer;
}
@Override
public String toString() {
return "NonResponsiveConsumerEvent [timeSinceLastPoll="
+ ((float) this.timeSinceLastPoll / 1000) + "s, listenerId=" + this.listenerId // NOSONAR magic #
+ ", container=" + getSource()
+ ", topicPartitions=" + this.topicPartitions + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/event/package-info.java | /**
* Application Events.
*/
package org.springframework.kafka.event;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/AbstractConsumerSeekAware.java | /*
* Copyright 2019-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.kafka.common.TopicPartition;
import org.springframework.lang.Nullable;
/**
* Manages the {@link ConsumerSeekAware.ConsumerSeekCallback} s for the listener. If the
* listener subclasses this class, it can easily seek arbitrary topics/partitions without
* having to keep track of the callbacks itself.
*
* @author Gary Russell
* @since 2.3
*
*/
public abstract class AbstractConsumerSeekAware implements ConsumerSeekAware {
private final ThreadLocal<ConsumerSeekCallback> callbackForThread = new ThreadLocal<>();
private final Map<TopicPartition, ConsumerSeekCallback> callbacks = new ConcurrentHashMap<>();
private final Map<ConsumerSeekCallback, List<TopicPartition>> callbacksToTopic = new ConcurrentHashMap<>();
@Override
public void registerSeekCallback(ConsumerSeekCallback callback) {
this.callbackForThread.set(callback);
}
@Override
public void onPartitionsAssigned(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
ConsumerSeekCallback threadCallback = this.callbackForThread.get();
if (threadCallback != null) {
assignments.keySet().forEach(tp -> {
this.callbacks.put(tp, threadCallback);
this.callbacksToTopic.computeIfAbsent(threadCallback, key -> new LinkedList<>()).add(tp);
});
}
}
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
partitions.forEach(tp -> {
ConsumerSeekCallback removed = this.callbacks.remove(tp);
if (removed != null) {
List<TopicPartition> topics = this.callbacksToTopic.get(removed);
if (topics != null) {
topics.remove(tp);
if (topics.size() == 0) {
this.callbacksToTopic.remove(removed);
}
}
}
});
}
@Override
public void unregisterSeekCallback() {
this.callbackForThread.remove();
}
/**
* Return the callback for the specified topic/partition.
* @param topicPartition the topic/partition.
* @return the callback (or null if there is no assignment).
*/
@Nullable
protected ConsumerSeekCallback getSeekCallbackFor(TopicPartition topicPartition) {
return this.callbacks.get(topicPartition);
}
/**
* The map of callbacks for all currently assigned partitions.
* @return the map.
*/
protected Map<TopicPartition, ConsumerSeekCallback> getSeekCallbacks() {
return Collections.unmodifiableMap(this.callbacks);
}
/**
* Return the currently registered callbacks and their associated {@link TopicPartition}(s).
* @return the map of callbacks and partitions.
* @since 2.6
*/
protected Map<ConsumerSeekCallback, List<TopicPartition>> getCallbacksAndTopics() {
return Collections.unmodifiableMap(this.callbacksToTopic);
}
/**
* Seek all assigned partitions to the beginning.
* @since 2.6
*/
public void seekToBeginning() {
getCallbacksAndTopics().forEach((cb, topics) -> cb.seekToBeginning(topics));
}
/**
* Seek all assigned partitions to the end.
* @since 2.6
*/
public void seekToEnd() {
getCallbacksAndTopics().forEach((cb, topics) -> cb.seekToEnd(topics));
}
/**
* Seek all assigned partitions to the offset represented by the timestamp.
* @param time the time to seek to.
* @since 2.6
*/
public void seekToTimestamp(long time) {
getCallbacksAndTopics().forEach((cb, topics) -> cb.seekToTimestamp(topics, time));
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/AbstractKafkaBackOffManagerFactory.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.ApplicationListener;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.kafka.config.KafkaListenerConfigUtils;
import org.springframework.util.Assert;
/**
* Base class for {@link KafkaBackOffManagerFactory} implementations.
*
* @author Tomaz Fernandes
* @since 2.7
* @see KafkaConsumerBackoffManager
*/
public abstract class AbstractKafkaBackOffManagerFactory
implements KafkaBackOffManagerFactory, ApplicationContextAware {
private ApplicationContext applicationContext;
private ListenerContainerRegistry listenerContainerRegistry;
/**
* Creates an instance with the provided {@link ListenerContainerRegistry},
* which will be used to fetch the {@link MessageListenerContainer} to back off.
* @param listenerContainerRegistry the listenerContainerRegistry to use.
*/
public AbstractKafkaBackOffManagerFactory(ListenerContainerRegistry listenerContainerRegistry) {
this.listenerContainerRegistry = listenerContainerRegistry;
}
/**
* Creates an instance that will retrieve the {@link ListenerContainerRegistry} from
* the {@link ApplicationContext}.
*/
public AbstractKafkaBackOffManagerFactory() {
this.listenerContainerRegistry = null;
}
/**
* Sets the {@link ListenerContainerRegistry}, that will be used to fetch the
* {@link MessageListenerContainer} to back off.
*
* @param listenerContainerRegistry the listenerContainerRegistry to use.
*/
public void setListenerContainerRegistry(ListenerContainerRegistry listenerContainerRegistry) {
this.listenerContainerRegistry = listenerContainerRegistry;
}
@Override
public KafkaConsumerBackoffManager create() {
return doCreateManager(getListenerContainerRegistry());
}
protected abstract KafkaConsumerBackoffManager doCreateManager(ListenerContainerRegistry registry);
protected ListenerContainerRegistry getListenerContainerRegistry() {
return this.listenerContainerRegistry != null
? this.listenerContainerRegistry
: getListenerContainerFromContext();
}
private ListenerContainerRegistry getListenerContainerFromContext() {
Assert.notNull(this.applicationContext, "ApplicationContext not set.");
return this.applicationContext.getBean(KafkaListenerConfigUtils.KAFKA_LISTENER_ENDPOINT_REGISTRY_BEAN_NAME,
ListenerContainerRegistry.class);
}
protected <T> T getBean(String beanName, Class<T> beanClass) {
return this.applicationContext.getBean(beanName, beanClass);
}
protected void addApplicationListener(ApplicationListener<?> applicationListener) {
((ConfigurableApplicationContext) this.applicationContext).addApplicationListener(applicationListener);
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) {
this.applicationContext = applicationContext;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/AbstractMessageListenerContainer.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.admin.DescribeTopicsResult;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.BeanNameAware;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.ApplicationEventPublisherAware;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.event.ContainerStoppedEvent;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
/**
* The base implementation for the {@link MessageListenerContainer}.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @author Marius Bogoevici
* @author Artem Bilan
* @author Tomaz Fernandes
*/
public abstract class AbstractMessageListenerContainer<K, V>
implements GenericMessageListenerContainer<K, V>, BeanNameAware, ApplicationEventPublisherAware,
ApplicationContextAware {
/**
* The default {@link org.springframework.context.SmartLifecycle} phase for listener
* containers {@value #DEFAULT_PHASE}.
*/
public static final int DEFAULT_PHASE = Integer.MAX_VALUE - 100; // late phase
private static final int DEFAULT_TOPIC_CHECK_TIMEOUT = 30;
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(this.getClass())); // NOSONAR
protected final ConsumerFactory<K, V> consumerFactory; // NOSONAR (final)
private final ContainerProperties containerProperties;
protected final Object lifecycleMonitor = new Object(); // NOSONAR
private final Set<TopicPartition> pauseRequestedPartitions = ConcurrentHashMap.newKeySet();
private String beanName;
private ApplicationEventPublisher applicationEventPublisher;
private GenericErrorHandler<?> errorHandler;
private CommonErrorHandler commonErrorHandler;
private boolean autoStartup = true;
private int phase = DEFAULT_PHASE;
private AfterRollbackProcessor<? super K, ? super V> afterRollbackProcessor =
new DefaultAfterRollbackProcessor<>();
private int topicCheckTimeout = DEFAULT_TOPIC_CHECK_TIMEOUT;
private RecordInterceptor<K, V> recordInterceptor;
private BatchInterceptor<K, V> batchInterceptor;
private boolean interceptBeforeTx = true;
private byte[] listenerInfo;
private ApplicationContext applicationContext;
private volatile boolean running = false;
private volatile boolean paused;
private volatile boolean stoppedNormally = true;
/**
* Construct an instance with the provided factory and properties.
* @param consumerFactory the factory.
* @param containerProperties the properties.
*/
@SuppressWarnings("unchecked")
protected AbstractMessageListenerContainer(ConsumerFactory<? super K, ? super V> consumerFactory,
ContainerProperties containerProperties) {
Assert.notNull(containerProperties, "'containerProperties' cannot be null");
Assert.notNull(consumerFactory, "'consumerFactory' cannot be null");
this.consumerFactory = (ConsumerFactory<K, V>) consumerFactory;
String[] topics = containerProperties.getTopics();
if (topics != null) {
this.containerProperties = new ContainerProperties(topics);
}
else {
Pattern topicPattern = containerProperties.getTopicPattern();
if (topicPattern != null) {
this.containerProperties = new ContainerProperties(topicPattern);
}
else {
TopicPartitionOffset[] topicPartitions = containerProperties.getTopicPartitions();
if (topicPartitions != null) {
this.containerProperties = new ContainerProperties(topicPartitions);
}
else {
throw new IllegalStateException("topics, topicPattern, or topicPartitions must be provided");
}
}
}
BeanUtils.copyProperties(containerProperties, this.containerProperties,
"topics", "topicPartitions", "topicPattern", "ackCount", "ackTime", "subBatchPerPartition");
if (containerProperties.getAckCount() > 0) {
this.containerProperties.setAckCount(containerProperties.getAckCount());
}
if (containerProperties.getAckTime() > 0) {
this.containerProperties.setAckTime(containerProperties.getAckTime());
}
Boolean subBatchPerPartition = containerProperties.getSubBatchPerPartition();
if (subBatchPerPartition != null) {
this.containerProperties.setSubBatchPerPartition(subBatchPerPartition);
}
if (this.containerProperties.getConsumerRebalanceListener() == null) {
this.containerProperties.setConsumerRebalanceListener(createSimpleLoggingConsumerRebalanceListener());
}
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
@Nullable
protected ApplicationContext getApplicationContext() {
return this.applicationContext;
}
@Override
public void setBeanName(String name) {
this.beanName = name;
}
/**
* Return the bean name.
* @return the bean name.
*/
@Nullable
public String getBeanName() {
return this.beanName;
}
@Override
public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) {
this.applicationEventPublisher = applicationEventPublisher;
}
/**
* Get the event publisher.
* @return the publisher
*/
@Nullable
public ApplicationEventPublisher getApplicationEventPublisher() {
return this.applicationEventPublisher;
}
/**
* Set the error handler to call when the listener throws an exception.
* @param errorHandler the error handler.
* @since 2.2
* @deprecated in favor of {@link #setCommonErrorHandler(CommonErrorHandler)}
* @see #setCommonErrorHandler(CommonErrorHandler)
*/
@Deprecated
public void setErrorHandler(ErrorHandler errorHandler) {
this.errorHandler = errorHandler;
}
/**
* Set the error handler to call when the listener throws an exception.
* @param errorHandler the error handler.
* @since 2.2
* @deprecated in favor of {@link #setCommonErrorHandler(CommonErrorHandler)}
* @see #setCommonErrorHandler(CommonErrorHandler)
*/
@Deprecated
public void setGenericErrorHandler(@Nullable GenericErrorHandler<?> errorHandler) {
this.errorHandler = errorHandler;
}
/**
* Set the batch error handler to call when the listener throws an exception.
* @param errorHandler the error handler.
* @since 2.2
* @deprecated in favor of {@link #setCommonErrorHandler(CommonErrorHandler)}
* @see #setCommonErrorHandler(CommonErrorHandler)
*/
@Deprecated
public void setBatchErrorHandler(BatchErrorHandler errorHandler) {
this.errorHandler = errorHandler;
}
/**
* Get the configured error handler.
* @return the error handler.
* @since 2.2
* @deprecated in favor of {@link #getCommonErrorHandler()}
* @see #getCommonErrorHandler()
*/
@Deprecated
@Nullable
public GenericErrorHandler<?> getGenericErrorHandler() {
return this.errorHandler;
}
/**
* Get the {@link CommonErrorHandler}.
* @return the handler.
* @since 2.8
*/
@Nullable
public CommonErrorHandler getCommonErrorHandler() {
return this.commonErrorHandler;
}
/**
* Set the {@link CommonErrorHandler} which can handle errors for both record
* and batch listeners. Replaces the use of {@link GenericErrorHandler}s.
* @param commonErrorHandler the handler.
* @since 2.8
*/
public void setCommonErrorHandler(@Nullable CommonErrorHandler commonErrorHandler) {
this.commonErrorHandler = commonErrorHandler;
}
protected boolean isStoppedNormally() {
return this.stoppedNormally;
}
protected void setStoppedNormally(boolean stoppedNormally) {
this.stoppedNormally = stoppedNormally;
}
@Override
public boolean isAutoStartup() {
return this.autoStartup;
}
@Override
public void setAutoStartup(boolean autoStartup) {
this.autoStartup = autoStartup;
}
protected void setRunning(boolean running) {
this.running = running;
}
@Override
public boolean isRunning() {
return this.running;
}
protected boolean isPaused() {
return this.paused;
}
@Override
public boolean isPartitionPauseRequested(TopicPartition topicPartition) {
return this.pauseRequestedPartitions.contains(topicPartition);
}
@Override
public void pausePartition(TopicPartition topicPartition) {
this.pauseRequestedPartitions.add(topicPartition);
}
@Override
public void resumePartition(TopicPartition topicPartition) {
this.pauseRequestedPartitions.remove(topicPartition);
}
@Override
public boolean isPauseRequested() {
return this.paused;
}
public void setPhase(int phase) {
this.phase = phase;
}
@Override
public int getPhase() {
return this.phase;
}
/**
* Return the currently configured {@link AfterRollbackProcessor}.
* @return the after rollback processor.
* @since 2.2.14
*/
public AfterRollbackProcessor<? super K, ? super V> getAfterRollbackProcessor() {
return this.afterRollbackProcessor;
}
/**
* Set a processor to perform seeks on unprocessed records after a rollback.
* Default will seek to current position all topics/partitions, including the failed
* record.
* @param afterRollbackProcessor the processor.
* @since 1.3.5
*/
public void setAfterRollbackProcessor(AfterRollbackProcessor<? super K, ? super V> afterRollbackProcessor) {
Assert.notNull(afterRollbackProcessor, "'afterRollbackProcessor' cannot be null");
this.afterRollbackProcessor = afterRollbackProcessor;
}
@Override
public ContainerProperties getContainerProperties() {
return this.containerProperties;
}
@Override
@Nullable
public String getGroupId() {
return this.containerProperties.getGroupId() == null
? (String) this.consumerFactory.getConfigurationProperties().get(ConsumerConfig.GROUP_ID_CONFIG)
: this.containerProperties.getGroupId();
}
@Override
@Nullable
public String getListenerId() {
return this.beanName; // the container factory sets the bean name to the id attribute
}
/**
* Get arbitrary static information that will be added to the
* {@link KafkaHeaders#LISTENER_INFO} header of all records.
* @return the info.
* @since 2.8.4
*/
@Nullable
public byte[] getListenerInfo() {
return this.listenerInfo != null ? Arrays.copyOf(this.listenerInfo, this.listenerInfo.length) : null;
}
/**
* Set arbitrary information that will be added to the
* {@link KafkaHeaders#LISTENER_INFO} header of all records.
* @param listenerInfo the info.
* @since 2.8.4
*/
public void setListenerInfo(@Nullable byte[] listenerInfo) {
this.listenerInfo = listenerInfo != null ? Arrays.copyOf(listenerInfo, listenerInfo.length) : null;
}
/**
* How long to wait for {@link AdminClient#describeTopics(Collection)} result
* futures to complete.
* @param topicCheckTimeout the timeout in seconds; default 30.
* @since 2.3
*/
public void setTopicCheckTimeout(int topicCheckTimeout) {
this.topicCheckTimeout = topicCheckTimeout;
}
protected RecordInterceptor<K, V> getRecordInterceptor() {
return this.recordInterceptor;
}
/**
* Set an interceptor to be called before calling the record listener.
* Does not apply to batch listeners.
* @param recordInterceptor the interceptor.
* @since 2.2.7
* @see #setInterceptBeforeTx(boolean)
*/
public void setRecordInterceptor(RecordInterceptor<K, V> recordInterceptor) {
this.recordInterceptor = recordInterceptor;
}
protected BatchInterceptor<K, V> getBatchInterceptor() {
return this.batchInterceptor;
}
/**
* Set an interceptor to be called before calling the record listener.
* @param batchInterceptor the interceptor.
* @since 2.6.6
* @see #setInterceptBeforeTx(boolean)
*/
public void setBatchInterceptor(BatchInterceptor<K, V> batchInterceptor) {
this.batchInterceptor = batchInterceptor;
}
protected boolean isInterceptBeforeTx() {
return this.interceptBeforeTx;
}
/**
* When false, invoke the interceptor after the transaction starts.
* @param interceptBeforeTx false to intercept within the transaction.
* Default true since 2.8.
* @since 2.3.4
* @see #setRecordInterceptor(RecordInterceptor)
* @see #setBatchInterceptor(BatchInterceptor)
*/
public void setInterceptBeforeTx(boolean interceptBeforeTx) {
this.interceptBeforeTx = interceptBeforeTx;
}
@Override
public void setupMessageListener(Object messageListener) {
this.containerProperties.setMessageListener(messageListener);
}
@Override
public final void start() {
checkGroupId();
synchronized (this.lifecycleMonitor) {
if (!isRunning()) {
Assert.state(this.containerProperties.getMessageListener() instanceof GenericMessageListener,
() -> "A " + GenericMessageListener.class.getName() + " implementation must be provided");
doStart();
}
}
}
protected void checkTopics() {
if (this.containerProperties.isMissingTopicsFatal() && this.containerProperties.getTopicPattern() == null) {
Map<String, Object> configs = this.consumerFactory.getConfigurationProperties()
.entrySet()
.stream()
.filter(entry -> AdminClientConfig.configNames().contains(entry.getKey()))
.collect(Collectors.toMap(Entry::getKey, Entry::getValue));
List<String> missing = null;
try (AdminClient client = AdminClient.create(configs)) { // NOSONAR - false positive null check
if (client != null) {
String[] topics = this.containerProperties.getTopics();
if (topics == null) {
topics = Arrays.stream(this.containerProperties.getTopicPartitions())
.map(TopicPartitionOffset::getTopic)
.toArray(String[]::new);
}
DescribeTopicsResult result = client.describeTopics(Arrays.asList(topics));
missing = result.values()
.entrySet()
.stream()
.filter(entry -> {
try {
entry.getValue().get(this.topicCheckTimeout, TimeUnit.SECONDS);
return false;
}
catch (@SuppressWarnings("unused") Exception e) {
return true;
}
})
.map(Entry::getKey)
.collect(Collectors.toList());
}
}
catch (Exception e) {
this.logger.error(e, "Failed to check topic existence");
}
if (missing != null && missing.size() > 0) {
throw new IllegalStateException(
"Topic(s) " + missing.toString()
+ " is/are not present and missingTopicsFatal is true");
}
}
}
public void checkGroupId() {
if (this.containerProperties.getTopicPartitions() == null) {
boolean hasGroupIdConsumerConfig = true; // assume true for non-standard containers
if (this.consumerFactory != null) { // we always have one for standard containers
Object groupIdConfig = this.consumerFactory.getConfigurationProperties()
.get(ConsumerConfig.GROUP_ID_CONFIG);
hasGroupIdConsumerConfig =
groupIdConfig instanceof String && StringUtils.hasText((String) groupIdConfig);
}
Assert.state(hasGroupIdConsumerConfig || StringUtils.hasText(this.containerProperties.getGroupId()),
"No group.id found in consumer config, container properties, or @KafkaListener annotation; "
+ "a group.id is required when group management is used.");
}
}
protected abstract void doStart();
@Override
public final void stop() {
stop(true);
}
/**
* Stop the container.
* @param wait wait for the listener to terminate.
* @since 2.3.8
*/
public final void stop(boolean wait) {
synchronized (this.lifecycleMonitor) {
if (isRunning()) {
if (wait) {
final CountDownLatch latch = new CountDownLatch(1);
doStop(latch::countDown);
try {
latch.await(this.containerProperties.getShutdownTimeout(), TimeUnit.MILLISECONDS); // NOSONAR
publishContainerStoppedEvent();
}
catch (@SuppressWarnings("unused") InterruptedException e) {
Thread.currentThread().interrupt();
}
}
else {
doStop(() -> {
publishContainerStoppedEvent();
});
}
}
}
}
@Override
public void pause() {
this.paused = true;
}
@Override
public void resume() {
this.paused = false;
}
@Override
public void stop(Runnable callback) {
synchronized (this.lifecycleMonitor) {
if (isRunning()) {
doStop(callback);
}
else {
callback.run();
}
}
}
@Override
public void stopAbnormally(Runnable callback) {
doStop(callback, false);
publishContainerStoppedEvent();
}
protected void doStop(Runnable callback) {
doStop(callback, true);
publishContainerStoppedEvent();
}
/**
* Stop the container normally or abnormally.
* @param callback the callback.
* @param normal true for an expected stop.
* @since 2.8
*/
protected abstract void doStop(Runnable callback, boolean normal);
/**
* Return default implementation of {@link ConsumerRebalanceListener} instance.
* @return the {@link ConsumerRebalanceListener} currently assigned to this container.
*/
protected final ConsumerRebalanceListener createSimpleLoggingConsumerRebalanceListener() {
return new ConsumerRebalanceListener() { // NOSONAR - anonymous inner class length
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
AbstractMessageListenerContainer.this.logger.info(() ->
getGroupId() + ": partitions revoked: " + partitions);
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
AbstractMessageListenerContainer.this.logger.info(() ->
getGroupId() + ": partitions assigned: " + partitions);
}
@Override
public void onPartitionsLost(Collection<TopicPartition> partitions) {
AbstractMessageListenerContainer.this.logger.info(() ->
getGroupId() + ": partitions lost: " + partitions);
}
};
}
protected void publishContainerStoppedEvent() {
ApplicationEventPublisher eventPublisher = getApplicationEventPublisher();
if (eventPublisher != null) {
eventPublisher.publishEvent(new ContainerStoppedEvent(this, parentOrThis()));
}
}
/**
* Return this or a parent container if this has a parent.
* @return the parent or this.
* @since 2.2.1
*/
protected AbstractMessageListenerContainer<?, ?> parentOrThis() {
return this;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/AcknowledgingConsumerAwareMessageListener.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.lang.Nullable;
/**
* Listener for handling incoming Kafka messages, propagating an acknowledgment handle that recipients
* can invoke when the message has been processed. Access to the {@link Consumer} is provided.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*
* @since 2.0
*/
@FunctionalInterface
public interface AcknowledgingConsumerAwareMessageListener<K, V> extends MessageListener<K, V> {
/**
* Invoked with data from kafka. Containers should never call this since it they
* will detect that we are a consumer aware acknowledging listener.
* @param data the data to be processed.
*/
@Override
default void onMessage(ConsumerRecord<K, V> data) {
throw new UnsupportedOperationException("Container should never call this");
}
@Override
void onMessage(ConsumerRecord<K, V> data, @Nullable Acknowledgment acknowledgment, Consumer<?, ?> consumer);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/AcknowledgingMessageListener.java | /*
* Copyright 2015-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.lang.Nullable;
/**
* Listener for handling incoming Kafka messages, propagating an acknowledgment handle that recipients
* can invoke when the message has been processed.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Marius Bogoevici
* @author Gary Russell
*/
@FunctionalInterface
public interface AcknowledgingMessageListener<K, V> extends MessageListener<K, V> {
/**
* Invoked with data from kafka. Containers should never call this since it they
* will detect that we are an acknowledging listener.
* @param data the data to be processed.
*/
@Override
default void onMessage(ConsumerRecord<K, V> data) {
throw new UnsupportedOperationException("Container should never call this");
}
/**
* Invoked with data from kafka.
* @param data the data to be processed.
* @param acknowledgment the acknowledgment.
*/
@Override
void onMessage(ConsumerRecord<K, V> data, @Nullable Acknowledgment acknowledgment);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/AfterRollbackProcessor.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.listener.ContainerProperties.EOSMode;
/**
* Invoked by a listener container with remaining, unprocessed, records
* (including the failed record). Implementations should seek the desired
* topics/partitions so that records will be re-fetched on the next
* poll. When used with a batch listener, the entire batch of records is
* provided.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*
* @since 1.3.5
*
*/
@FunctionalInterface
public interface AfterRollbackProcessor<K, V> {
/**
* Process the remaining records. Recoverable will be true if the container is
* processing individual records; this allows the processor to recover (skip) the
* failed record rather than re-seeking it. This is not possible with a batch listener
* since only the listener itself knows which record in the batch keeps failing.
* IMPORTANT: If invoked in a transaction when the listener was invoked with a single
* record, the transaction id will be based on the container group.id and the
* topic/partition of the failed record, to avoid issues with zombie fencing. So,
* generally, only its offset should be sent to the transaction. For other behavior
* the process method should manage its own transaction.
* @param records the records.
* @param consumer the consumer.
* @param container the container.
* @param exception the exception
* @param recoverable the recoverable.
* @param eosMode the {@link EOSMode}.
* @since 2.6.6
* @see #isProcessInTransaction()
*/
void process(List<ConsumerRecord<K, V>> records, Consumer<K, V> consumer,
MessageListenerContainer container, Exception exception, boolean recoverable, EOSMode eosMode);
/**
* Optional method to clear thread state; will be called just before a consumer
* thread terminates.
* @since 2.2
*/
default void clearThreadState() {
}
/**
* Return true to invoke
* {@link #process(List, Consumer, MessageListenerContainer, Exception, boolean, ContainerProperties.EOSMode)}
* in a new transaction. Because the container cannot infer the desired behavior, the
* processor is responsible for sending the offset to the transaction if it decides to
* skip the failing record.
* @return true to run in a transaction; default false.
* @since 2.2.5
* @see #process(List, Consumer, MessageListenerContainer, Exception, boolean,
* ContainerProperties.EOSMode)
*/
default boolean isProcessInTransaction() {
return false;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/BatchAcknowledgingConsumerAwareMessageListener.java | /*
* Copyright 2017-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.lang.Nullable;
/**
* Listener for handling a batch of incoming Kafka messages, propagating an acknowledgment
* handle that recipients can invoke when the message has been processed. The list is
* created from the consumer records object returned by a poll. Access to the
* {@link Consumer} is provided.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*
* @since 2.0
*/
@FunctionalInterface
public interface BatchAcknowledgingConsumerAwareMessageListener<K, V> extends BatchMessageListener<K, V> {
/**
* Invoked with data from kafka. Containers should never call this since it they
* will detect that we are a consumer aware acknowledging listener.
* @param data the data to be processed.
*/
@Override
default void onMessage(List<ConsumerRecord<K, V>> data) {
throw new UnsupportedOperationException("Container should never call this");
}
@Override
void onMessage(List<ConsumerRecord<K, V>> data, @Nullable Acknowledgment acknowledgment, Consumer<?, ?> consumer);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/BatchAcknowledgingMessageListener.java | /*
* Copyright 2015-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.support.Acknowledgment;
/**
* Listener for handling a batch of incoming Kafka messages, propagating an acknowledgment
* handle that recipients can invoke when the message has been processed. The list is
* created from the consumer records object returned by a poll.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Marius Bogoevici
* @author Gary Russell
*
* @since 1.1
*/
@FunctionalInterface
public interface BatchAcknowledgingMessageListener<K, V> extends BatchMessageListener<K, V> {
/**
* Invoked with data from kafka. Containers should never call this since it they
* will detect that we are an acknowledging listener.
* @param data the data to be processed.
*/
@Override
default void onMessage(List<ConsumerRecord<K, V>> data) {
throw new UnsupportedOperationException("Container should never call this");
}
@Override
void onMessage(List<ConsumerRecord<K, V>> data, Acknowledgment acknowledgment);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/BatchConsumerAwareMessageListener.java | /*
* Copyright 2017-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
/**
* Listener for handling a batch of incoming Kafka messages; the list
* is created from the consumer records object returned by a poll.
* Access to the {@link Consumer} is provided.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*
* @since 2.0
*/
@FunctionalInterface
public interface BatchConsumerAwareMessageListener<K, V> extends BatchMessageListener<K, V> {
/**
* Invoked with data from kafka. Containers should never call this since it they
* will detect that we are a consumer aware acknowledging listener.
* @param data the data to be processed.
*/
@Override
default void onMessage(List<ConsumerRecord<K, V>> data) {
throw new UnsupportedOperationException("Container should never call this");
}
@Override
void onMessage(List<ConsumerRecord<K, V>> data, Consumer<?, ?> consumer);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/BatchErrorHandler.java | /*
* Copyright 2015-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.lang.Nullable;
/**
* Handles errors thrown during the execution of a {@link BatchMessageListener}.
* The listener should communicate which position(s) in the list failed in the
* exception.
*
* @author Gary Russell
*
* @since 1.1
*/
public interface BatchErrorHandler extends GenericErrorHandler<ConsumerRecords<?, ?>> {
/**
* Handle the exception.
* @param thrownException the exception.
* @param data the consumer records.
* @param consumer the consumer.
* @param container the container.
*/
default void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container) {
handle(thrownException, data);
}
/**
* Handle the exception.
* @param thrownException the exception.
* @param data the consumer records.
* @param consumer the consumer.
* @param container the container.
* @param invokeListener a callback to re-invoke the listener.
* @since 2.3.7
*/
default void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data,
Consumer<?, ?> consumer, MessageListenerContainer container, Runnable invokeListener) {
handle(thrownException, data);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/BatchInterceptor.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.lang.Nullable;
/**
* An interceptor for batches of records.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.7
*
*/
@FunctionalInterface
public interface BatchInterceptor<K, V> extends ThreadStateProcessor {
/**
* Perform some action on the records or return a different one. If null is returned
* the records will be skipped. Invoked before the listener.
* @param records the records.
* @param consumer the consumer.
* @return the records or null.
*/
@Nullable
ConsumerRecords<K, V> intercept(ConsumerRecords<K, V> records, Consumer<K, V> consumer);
/**
* Called after the listener exits normally.
* @param records the records.
* @param consumer the consumer.
*/
default void success(ConsumerRecords<K, V> records, Consumer<K, V> consumer) {
}
/**
* Called after the listener throws an exception.
* @param records the records.
* @param exception the exception.
* @param consumer the consumer.
*/
default void failure(ConsumerRecords<K, V> records, Exception exception, Consumer<K, V> consumer) {
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/BatchListenerFailedException.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.KafkaException;
import org.springframework.lang.Nullable;
/**
* An exception thrown by user code to inform the framework which record in a batch has
* failed.
*
* @author Gary Russell
* @since 2.5
*
*/
public class BatchListenerFailedException extends KafkaException {
private static final long serialVersionUID = 1L;
private final ConsumerRecord<?, ?> record;
private final int index;
/**
* Construct an instance with the provided properties.
* @param message the message.
* @param index the index in the batch of the failed record.
*/
public BatchListenerFailedException(String message, int index) {
this(message, null, index);
}
/**
* Construct an instance with the provided properties.
* @param message the message.
* @param cause the cause.
* @param index the index in the batch of the failed record.
*/
public BatchListenerFailedException(String message, @Nullable Throwable cause, int index) {
super(message, cause);
this.index = index;
this.record = null;
}
/**
* Construct an instance with the provided properties.
* @param message the message.
* @param record the failed record.
*/
public BatchListenerFailedException(String message, ConsumerRecord<?, ?> record) {
this(message, null, record);
}
/**
* Construct an instance with the provided properties.
* @param message the message.
* @param cause the cause.
* @param record the failed record.
*/
public BatchListenerFailedException(String message, @Nullable Throwable cause, ConsumerRecord<?, ?> record) {
super(message, cause);
this.record = record;
this.index = -1;
}
/**
* Return the failed record.
* @return the record.
*/
@Nullable
public ConsumerRecord<?, ?> getRecord() {
return this.record;
}
/**
* Return the index in the batch of the failed record.
* @return the index.
*/
public int getIndex() {
return this.index;
}
@Override
public String getMessage() {
return super.getMessage() + (this.record != null
? (this.record.topic() + "-" + this.record.partition() + "@" + this.record.offset())
: (" @-" + this.index));
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/BatchLoggingErrorHandler.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.core.log.LogAccessor;
import org.springframework.lang.Nullable;
/**
* Simple handler that logs each record.
*
* @author Gary Russell
* @since 1.1
* @deprecated - use the {@link CommonLoggingErrorHandler} instead.
*/
@Deprecated
public class BatchLoggingErrorHandler implements BatchErrorHandler {
private static final LogAccessor LOGGER =
new LogAccessor(LogFactory.getLog(BatchLoggingErrorHandler.class));
@Override
public void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data) {
StringBuilder message = new StringBuilder("Error while processing:\n");
if (data == null) {
message.append("null ");
}
else {
for (ConsumerRecord<?, ?> record : data) {
message.append(ListenerUtils.recordToString(record)).append('\n');
}
}
LOGGER.error(thrownException, () -> message.substring(0, message.length() - 1));
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/BatchMessageListener.java | /*
* Copyright 2015-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.lang.Nullable;
/**
* Listener for handling a batch of incoming Kafka messages; the list
* is created from the consumer records object returned by a poll.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Marius Bogoevici
* @author Gary Russell
*
* @since 1.1
*/
@FunctionalInterface
public interface BatchMessageListener<K, V> extends GenericMessageListener<List<ConsumerRecord<K, V>>> {
/**
* Listener receives the original {@link ConsumerRecords} object instead of a
* list of {@link ConsumerRecord}.
* @param records the records.
* @param acknowledgment the acknowledgment (null if not manual acks)
* @param consumer the consumer.
* @since 2.2
*/
default void onMessage(ConsumerRecords<K, V> records, @Nullable Acknowledgment acknowledgment,
Consumer<K, V> consumer) {
throw new UnsupportedOperationException("This batch listener doesn't support ConsumerRecords");
}
/**
* Return true if this listener wishes to receive the original {@link ConsumerRecords}
* object instead of a list of {@link ConsumerRecord}.
* @return true for consumer records.
* @since 2.2
*/
default boolean wantsPollResult() {
return false;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/CommonContainerStoppingErrorHandler.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import java.util.concurrent.Executor;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.core.task.SimpleAsyncTaskExecutor;
import org.springframework.kafka.KafkaException;
import org.springframework.util.Assert;
/**
* A {@link CommonErrorHandler} that stops the container when an error occurs. Replaces
* the legacy {@link ContainerStoppingErrorHandler} and
* {@link ContainerStoppingBatchErrorHandler}.
*
* @author Gary Russell
* @since 2.8
*
*/
public class CommonContainerStoppingErrorHandler extends KafkaExceptionLogLevelAware implements CommonErrorHandler {
private final Executor executor;
private boolean stopContainerAbnormally = true;
/**
* Construct an instance with a default {@link SimpleAsyncTaskExecutor}.
*/
public CommonContainerStoppingErrorHandler() {
this(new SimpleAsyncTaskExecutor("containerStop-"));
}
/**
* Construct an instance with the provided {@link Executor}.
* @param executor the executor.
*/
public CommonContainerStoppingErrorHandler(Executor executor) {
Assert.notNull(executor, "'executor' cannot be null");
this.executor = executor;
}
/**
* Set to false to stop the container normally. By default, the container is stopped
* abnormally, so that {@code container.isInExpectedState()} returns false. If you
* want to container to remain "healthy" when using this error handler, set the
* property to false.
* @param stopContainerAbnormally false for normal stop.
* @since 2.8
*/
public void setStopContainerAbnormally(boolean stopContainerAbnormally) {
this.stopContainerAbnormally = stopContainerAbnormally;
}
@Override
public boolean remainingRecords() {
return true;
}
@Override
public void handleOtherException(Exception thrownException, Consumer<?, ?> consumer,
MessageListenerContainer container, boolean batchListener) {
stopContainer(container, thrownException);
}
@Override
public void handleRemaining(Exception thrownException, List<ConsumerRecord<?, ?>> records, Consumer<?, ?> consumer,
MessageListenerContainer container) {
stopContainer(container, thrownException);
}
@Override
public void handleBatch(Exception thrownException, ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container, Runnable invokeListener) {
stopContainer(container, thrownException);
}
private void stopContainer(MessageListenerContainer container, Exception thrownException) {
this.executor.execute(() -> {
if (this.stopContainerAbnormally) {
container.stopAbnormally(() -> {
});
}
else {
container.stop(() -> {
});
}
});
// isRunning is false before the container.stop() waits for listener thread
try {
ListenerUtils.stoppableSleep(container, 10_000); // NOSONAR
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
throw new KafkaException("Stopped container", getLogLevel(), thrownException);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/CommonDelegatingErrorHandler.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
/**
* An error handler that delegates to different error handlers, depending on the exception
* type. The delegates must have compatible properties ({@link #isAckAfterHandle()} etc.
* {@link #deliveryAttemptHeader()} is not supported - always returns false.
*
* @author Gary Russell
* @since 2.8
*
*/
public class CommonDelegatingErrorHandler implements CommonErrorHandler {
private final CommonErrorHandler defaultErrorHandler;
private final Map<Class<? extends Throwable>, CommonErrorHandler> delegates = new LinkedHashMap<>();
/**
* Construct an instance with a default error handler that will be invoked if the
* exception has no matches.
* @param defaultErrorHandler the default error handler.
*/
public CommonDelegatingErrorHandler(CommonErrorHandler defaultErrorHandler) {
Assert.notNull(defaultErrorHandler, "'defaultErrorHandler' cannot be null");
this.defaultErrorHandler = defaultErrorHandler;
}
/**
* Set the delegate error handlers; a {@link LinkedHashMap} argument is recommended so
* that the delegates are searched in a known order.
* @param delegates the delegates.
*/
public void setErrorHandlers(Map<Class<? extends Throwable>, CommonErrorHandler> delegates) {
this.delegates.clear();
this.delegates.putAll(delegates);
checkDelegates();
}
@Override
public boolean remainingRecords() {
return this.defaultErrorHandler.remainingRecords();
}
@Override
public void clearThreadState() {
this.defaultErrorHandler.clearThreadState();
this.delegates.values().forEach(handler -> handler.clearThreadState());
}
@Override
public boolean isAckAfterHandle() {
return this.defaultErrorHandler.isAckAfterHandle();
}
@Override
public void setAckAfterHandle(boolean ack) {
this.defaultErrorHandler.setAckAfterHandle(ack);
}
/**
* Add a delegate to the end of the current collection.
* @param throwable the throwable for this handler.
* @param handler the handler.
*/
public void addDelegate(Class<? extends Throwable> throwable, CommonErrorHandler handler) {
this.delegates.put(throwable, handler);
checkDelegates();
}
private void checkDelegates() {
boolean remainingRecords = this.defaultErrorHandler.remainingRecords();
boolean ackAfterHandle = this.defaultErrorHandler.isAckAfterHandle();
this.delegates.values().forEach(handler -> {
Assert.isTrue(remainingRecords == handler.remainingRecords(),
"All delegates must return the same value when calling 'remainingRecords()'");
Assert.isTrue(ackAfterHandle == handler.isAckAfterHandle(),
"All delegates must return the same value when calling 'isAckAfterHandle()'");
});
}
@Override
public void handleRemaining(Exception thrownException, List<ConsumerRecord<?, ?>> records,
Consumer<?, ?> consumer, MessageListenerContainer container) {
CommonErrorHandler handler = findDelegate(thrownException);
if (handler != null) {
handler.handleRemaining(thrownException, records, consumer, container);
}
else {
this.defaultErrorHandler.handleRemaining(thrownException, records, consumer, container);
}
}
@Override
public void handleBatch(Exception thrownException, ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container, Runnable invokeListener) {
CommonErrorHandler handler = findDelegate(thrownException);
if (handler != null) {
handler.handleBatch(thrownException, data, consumer, container, invokeListener);
}
else {
this.defaultErrorHandler.handleBatch(thrownException, data, consumer, container, invokeListener);
}
}
@Override
public void handleOtherException(Exception thrownException, Consumer<?, ?> consumer,
MessageListenerContainer container, boolean batchListener) {
CommonErrorHandler handler = findDelegate(thrownException);
if (handler != null) {
handler.handleOtherException(thrownException, consumer, container, batchListener);
}
else {
this.defaultErrorHandler.handleOtherException(thrownException, consumer, container, batchListener);
}
}
@Nullable
private CommonErrorHandler findDelegate(Throwable thrownException) {
Throwable cause = thrownException;
if (cause instanceof ListenerExecutionFailedException) {
cause = thrownException.getCause();
}
if (cause != null) {
Class<? extends Throwable> causeClass = cause.getClass();
for (Entry<Class<? extends Throwable>, CommonErrorHandler> entry : this.delegates.entrySet()) {
if (entry.getKey().isAssignableFrom(causeClass)) {
return entry.getValue();
}
}
}
return null;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/CommonErrorHandler.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.kafka.support.TopicPartitionOffset;
/**
* Replacement for {@link ErrorHandler} and {@link BatchErrorHandler} and their
* sub-interfaces.
*
* @author Gary Russell
* @since 2.8
*
*/
public interface CommonErrorHandler extends DeliveryAttemptAware {
/**
* Return false if this error handler should only receive the current failed record;
* remaining records will be passed to the listener after the error handler returns.
* When true (default), all remaining records including the failed record are passed
* to the error handler.
* @return false to receive only the failed record.
* @see #handleRecord(Exception, ConsumerRecord, Consumer, MessageListenerContainer)
* @see #handleRemaining(Exception, List, Consumer, MessageListenerContainer)
*/
default boolean remainingRecords() {
return false;
}
/**
* Return true if this error handler supports delivery attempts headers.
* @return true if capable.
*/
default boolean deliveryAttemptHeader() {
return false;
}
/**
* Called when an exception is thrown with no records available, e.g. if the consumer
* poll throws an exception.
* @param thrownException the exception.
* @param consumer the consumer.
* @param container the container.
* @param batchListener true if the listener is a batch listener.
*/
default void handleOtherException(Exception thrownException, Consumer<?, ?> consumer,
MessageListenerContainer container, boolean batchListener) {
LogFactory.getLog(getClass()).error("'handleOtherException' is not implemented by this handler",
thrownException);
}
/**
* Handle the exception for a record listener when {@link #remainingRecords()} returns
* false. Use this to handle just the single failed record; remaining records from the
* poll will be sent to the listener.
* @param thrownException the exception.
* @param record the record.
* @param consumer the consumer.
* @param container the container.
* @see #remainingRecords()
*/
default void handleRecord(Exception thrownException, ConsumerRecord<?, ?> record, Consumer<?, ?> consumer,
MessageListenerContainer container) {
LogFactory.getLog(getClass()).error("'handleRecord' is not implemented by this handler", thrownException);
}
/**
* Handle the exception for a record listener when {@link #remainingRecords()} returns
* true. The failed record and all the remaining records from the poll are passed in.
* Usually used when the error handler performs seeks so that the remaining records
* will be redelivered on the next poll.
* @param thrownException the exception.
* @param records the remaining records including the one that failed.
* @param consumer the consumer.
* @param container the container.
* @see #remainingRecords()
*/
default void handleRemaining(Exception thrownException, List<ConsumerRecord<?, ?>> records, Consumer<?, ?> consumer,
MessageListenerContainer container) {
LogFactory.getLog(getClass()).error("'handleRemaining' is not implemented by this handler", thrownException);
}
/**
* Handle the exception for a batch listener. The complete {@link ConsumerRecords}
* from the poll is supplied. The error handler needs to perform seeks if you wish to
* reprocess the records in the batch.
* @param thrownException the exception.
* @param data the consumer records.
* @param consumer the consumer.
* @param container the container.
* @param invokeListener a callback to re-invoke the listener.
*/
default void handleBatch(Exception thrownException, ConsumerRecords<?, ?> data,
Consumer<?, ?> consumer, MessageListenerContainer container, Runnable invokeListener) {
LogFactory.getLog(getClass()).error("'handleBatch' is not implemented by this handler", thrownException);
}
@Override
default int deliveryAttempt(TopicPartitionOffset topicPartitionOffset) {
return 0;
}
/**
* Optional method to clear thread state; will be called just before a consumer
* thread terminates.
*/
default void clearThreadState() {
}
/**
* Return true if the offset should be committed for a handled error (no exception
* thrown).
* @return true to commit.
*/
default boolean isAckAfterHandle() {
return true;
}
/**
* Set to false to prevent the container from committing the offset of a recovered
* record (when the error handler does not itself throw an exception).
* @param ack false to not commit.
*/
default void setAckAfterHandle(boolean ack) {
throw new UnsupportedOperationException("This error handler does not support setting this property");
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/CommonLoggingErrorHandler.java | /*
* Copyright 2021-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.core.log.LogAccessor;
/**
* The {@link CommonErrorHandler} implementation for logging exceptions.
*
* @author Gary Russell
* @since 2.8
*
*/
public class CommonLoggingErrorHandler implements CommonErrorHandler {
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(CommonLoggingErrorHandler.class));
private boolean ackAfterHandle = true;
@Override
public boolean isAckAfterHandle() {
return this.ackAfterHandle;
}
@Override
public void setAckAfterHandle(boolean ackAfterHandle) {
this.ackAfterHandle = ackAfterHandle;
}
@SuppressWarnings("deprecation")
@Override
public void handleRecord(Exception thrownException, ConsumerRecord<?, ?> record, Consumer<?, ?> consumer,
MessageListenerContainer container) {
LOGGER.error(thrownException, () -> "Error occured while processing: " + ListenerUtils.recordToString(record));
}
@SuppressWarnings("deprecation")
@Override
public void handleBatch(Exception thrownException, ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container, Runnable invokeListener) {
StringBuilder message = new StringBuilder("Error occurred while processing:\n");
for (ConsumerRecord<?, ?> record : data) {
message.append(ListenerUtils.recordToString(record)).append('\n');
}
LOGGER.error(thrownException, () -> message.substring(0, message.length() - 1));
}
@Override
public void handleOtherException(Exception thrownException, Consumer<?, ?> consumer,
MessageListenerContainer container, boolean batchListener) {
LOGGER.error(thrownException, () -> "Error occurred while not processing records");
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/CommonMixedErrorHandler.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.util.Assert;
/**
* A {@link CommonErrorHandler} that delegates to different {@link CommonErrorHandler}s
* for record and batch listeners.
*
* @author Gary Russell
* @since 2.8
*
*/
public class CommonMixedErrorHandler implements CommonErrorHandler {
private final CommonErrorHandler recordErrorHandler;
private final CommonErrorHandler batchErrorHandler;
/**
* Construct an instance with the provided delegate {@link CommonErrorHandler}s.
* @param recordErrorHandler the error handler for record listeners.
* @param batchErrorHandler the error handler for batch listeners.
*/
public CommonMixedErrorHandler(CommonErrorHandler recordErrorHandler, CommonErrorHandler batchErrorHandler) {
Assert.notNull(recordErrorHandler, "'recordErrorHandler' cannot be null");
Assert.notNull(recordErrorHandler, "'batchErrorHandler' cannot be null");
this.recordErrorHandler = recordErrorHandler;
this.batchErrorHandler = batchErrorHandler;
}
@Override
public boolean remainingRecords() {
return this.recordErrorHandler.remainingRecords();
}
@Override
public boolean deliveryAttemptHeader() {
return this.recordErrorHandler.deliveryAttemptHeader();
}
@Override
public void handleOtherException(Exception thrownException, Consumer<?, ?> consumer,
MessageListenerContainer container, boolean batchListener) {
if (batchListener) {
this.batchErrorHandler.handleOtherException(thrownException, consumer, container, batchListener);
}
else {
this.recordErrorHandler.handleOtherException(thrownException, consumer, container, batchListener);
}
}
@Override
public void handleRecord(Exception thrownException, ConsumerRecord<?, ?> record, Consumer<?, ?> consumer,
MessageListenerContainer container) {
this.recordErrorHandler.handleRecord(thrownException, record, consumer, container);
}
@Override
public void handleRemaining(Exception thrownException, List<ConsumerRecord<?, ?>> records, Consumer<?, ?> consumer,
MessageListenerContainer container) {
this.recordErrorHandler.handleRemaining(thrownException, records, consumer, container);
}
@Override
public void handleBatch(Exception thrownException, ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container, Runnable invokeListener) {
this.batchErrorHandler.handleBatch(thrownException, data, consumer, container, invokeListener);
}
@Override
public int deliveryAttempt(TopicPartitionOffset topicPartitionOffset) {
return this.recordErrorHandler.deliveryAttempt(topicPartitionOffset);
}
@Override
public void clearThreadState() {
this.batchErrorHandler.clearThreadState();
this.recordErrorHandler.clearThreadState();
}
@Override
public boolean isAckAfterHandle() {
return this.recordErrorHandler.isAckAfterHandle();
}
@Override
public void setAckAfterHandle(boolean ack) {
this.batchErrorHandler.setAckAfterHandle(ack);
this.recordErrorHandler.setAckAfterHandle(ack);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/CompositeBatchInterceptor.java | /*
* Copyright 2019-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.util.Assert;
/**
* A {@link BatchInterceptor} that delegates to one or more {@link BatchInterceptor}s in
* order.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.7
*
*/
public class CompositeBatchInterceptor<K, V> implements BatchInterceptor<K, V> {
private final Collection<BatchInterceptor<K, V>> delegates = new ArrayList<>();
/**
* Construct an instance with the provided delegates.
* @param delegates the delegates.
*/
@SafeVarargs
@SuppressWarnings("varargs")
public CompositeBatchInterceptor(BatchInterceptor<K, V>... delegates) {
Assert.notNull(delegates, "'delegates' cannot be null");
Assert.noNullElements(delegates, "'delegates' cannot have null entries");
this.delegates.addAll(Arrays.asList(delegates));
}
@Override
public ConsumerRecords<K, V> intercept(ConsumerRecords<K, V> records, Consumer<K, V> consumer) {
ConsumerRecords<K, V> recordsToIntercept = records;
for (BatchInterceptor<K, V> delegate : this.delegates) {
recordsToIntercept = delegate.intercept(recordsToIntercept, consumer);
if (recordsToIntercept == null) {
break;
}
}
return recordsToIntercept;
}
@Override
public void success(ConsumerRecords<K, V> records, Consumer<K, V> consumer) {
this.delegates.forEach(del -> del.success(records, consumer));
}
@Override
public void failure(ConsumerRecords<K, V> records, Exception exception, Consumer<K, V> consumer) {
this.delegates.forEach(del -> del.failure(records, exception, consumer));
}
@Override
public void setupThreadState(Consumer<?, ?> consumer) {
this.delegates.forEach(del -> del.setupThreadState(consumer));
}
@Override
public void clearThreadState(Consumer<?, ?> consumer) {
this.delegates.forEach(del -> del.clearThreadState(consumer));
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/CompositeRecordInterceptor.java | /*
* Copyright 2019-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
/**
* A {@link RecordInterceptor} that delegates to one or more {@link RecordInterceptor}s in
* order.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Artem Bilan
* @author Gary Russell
* @since 2.3
*
*/
public class CompositeRecordInterceptor<K, V> implements ConsumerAwareRecordInterceptor<K, V> {
private final Collection<RecordInterceptor<K, V>> delegates = new ArrayList<>();
/**
* Construct an instance with the provided delegates.
* @param delegates the delegates.
*/
@SafeVarargs
@SuppressWarnings("varargs")
public CompositeRecordInterceptor(RecordInterceptor<K, V>... delegates) {
Assert.notNull(delegates, "'delegates' cannot be null");
Assert.noNullElements(delegates, "'delegates' cannot have null entries");
this.delegates.addAll(Arrays.asList(delegates));
}
@Override
@Nullable
public ConsumerRecord<K, V> intercept(ConsumerRecord<K, V> record, Consumer<K, V> consumer) {
ConsumerRecord<K, V> recordToIntercept = record;
for (RecordInterceptor<K, V> delegate : this.delegates) {
recordToIntercept = delegate.intercept(recordToIntercept, consumer);
if (recordToIntercept == null) {
break;
}
}
return recordToIntercept;
}
@Override
public void success(ConsumerRecord<K, V> record, Consumer<K, V> consumer) {
this.delegates.forEach(del -> del.success(record, consumer));
}
@Override
public void failure(ConsumerRecord<K, V> record, Exception exception, Consumer<K, V> consumer) {
this.delegates.forEach(del -> del.failure(record, exception, consumer));
}
@Override
public void setupThreadState(Consumer<?, ?> consumer) {
this.delegates.forEach(del -> del.setupThreadState(consumer));
}
@Override
public void clearThreadState(Consumer<?, ?> consumer) {
this.delegates.forEach(del -> del.clearThreadState(consumer));
}
@Override
public void afterRecord(ConsumerRecord<K, V> record, Consumer<K, V> consumer) {
this.delegates.forEach(del -> del.afterRecord(record, consumer));
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConcurrentMessageListenerContainer.java | /*
* Copyright 2015-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.TopicPartition;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.core.task.AsyncListenableTaskExecutor;
import org.springframework.core.task.SimpleAsyncTaskExecutor;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
/**
* Creates 1 or more {@link KafkaMessageListenerContainer}s based on
* {@link #setConcurrency(int) concurrency}. If the
* {@link ContainerProperties} is configured with {@link org.apache.kafka.common.TopicPartition}s,
* the {@link org.apache.kafka.common.TopicPartition}s are distributed evenly across the
* instances.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Marius Bogoevici
* @author Gary Russell
* @author Murali Reddy
* @author Jerome Mirc
* @author Artem Bilan
* @author Vladimir Tsanev
* @author Tomaz Fernandes
*/
public class ConcurrentMessageListenerContainer<K, V> extends AbstractMessageListenerContainer<K, V> {
private final List<KafkaMessageListenerContainer<K, V>> containers = new ArrayList<>();
private final List<AsyncListenableTaskExecutor> executors = new ArrayList<>();
private int concurrency = 1;
private boolean alwaysClientIdSuffix = true;
/**
* Construct an instance with the supplied configuration properties.
* The topic partitions are distributed evenly across the delegate
* {@link KafkaMessageListenerContainer}s.
* @param consumerFactory the consumer factory.
* @param containerProperties the container properties.
*/
public ConcurrentMessageListenerContainer(ConsumerFactory<? super K, ? super V> consumerFactory,
ContainerProperties containerProperties) {
super(consumerFactory, containerProperties);
Assert.notNull(consumerFactory, "A ConsumerFactory must be provided");
}
public int getConcurrency() {
return this.concurrency;
}
/**
* The maximum number of concurrent {@link KafkaMessageListenerContainer}s running.
* Messages from within the same partition will be processed sequentially.
* @param concurrency the concurrency.
*/
public void setConcurrency(int concurrency) {
Assert.isTrue(concurrency > 0, "concurrency must be greater than 0");
this.concurrency = concurrency;
}
/**
* Set to false to suppress adding a suffix to the child container's client.id when
* the concurrency is only 1.
* @param alwaysClientIdSuffix false to suppress, true (default) to include.
* @since 2.2.14
*/
public void setAlwaysClientIdSuffix(boolean alwaysClientIdSuffix) {
this.alwaysClientIdSuffix = alwaysClientIdSuffix;
}
/**
* Return the list of {@link KafkaMessageListenerContainer}s created by
* this container.
* @return the list of {@link KafkaMessageListenerContainer}s created by
* this container.
*/
public List<KafkaMessageListenerContainer<K, V>> getContainers() {
synchronized (this.lifecycleMonitor) {
return Collections.unmodifiableList(new ArrayList<>(this.containers));
}
}
@Override
public Collection<TopicPartition> getAssignedPartitions() {
synchronized (this.lifecycleMonitor) {
return this.containers.stream()
.map(KafkaMessageListenerContainer::getAssignedPartitions)
.filter(Objects::nonNull)
.flatMap(Collection::stream)
.collect(Collectors.toList());
}
}
@Override
public Map<String, Collection<TopicPartition>> getAssignmentsByClientId() {
synchronized (this.lifecycleMonitor) {
Map<String, Collection<TopicPartition>> assignments = new HashMap<>();
this.containers.forEach(container -> {
Map<String, Collection<TopicPartition>> byClientId = container.getAssignmentsByClientId();
if (byClientId != null) {
assignments.putAll(byClientId);
}
});
return assignments;
}
}
@Override
public boolean isContainerPaused() {
synchronized (this.lifecycleMonitor) {
boolean paused = isPaused();
if (paused) {
for (AbstractMessageListenerContainer<K, V> container : this.containers) {
if (!container.isContainerPaused()) {
return false;
}
}
}
return paused;
}
}
@Override
public boolean isChildRunning() {
if (!isRunning()) {
return false;
}
for (MessageListenerContainer container : this.containers) {
if (container.isRunning()) {
return true;
}
}
return false;
}
@Override
public Map<String, Map<MetricName, ? extends Metric>> metrics() {
synchronized (this.lifecycleMonitor) {
Map<String, Map<MetricName, ? extends Metric>> metrics = new HashMap<>();
for (KafkaMessageListenerContainer<K, V> container : this.containers) {
metrics.putAll(container.metrics());
}
return Collections.unmodifiableMap(metrics);
}
}
/*
* Under lifecycle lock.
*/
@Override
protected void doStart() {
if (!isRunning()) {
checkTopics();
ContainerProperties containerProperties = getContainerProperties();
TopicPartitionOffset[] topicPartitions = containerProperties.getTopicPartitions();
if (topicPartitions != null && this.concurrency > topicPartitions.length) {
this.logger.warn(() -> "When specific partitions are provided, the concurrency must be less than or "
+ "equal to the number of partitions; reduced from " + this.concurrency + " to "
+ topicPartitions.length);
this.concurrency = topicPartitions.length;
}
setRunning(true);
for (int i = 0; i < this.concurrency; i++) {
KafkaMessageListenerContainer<K, V> container =
constructContainer(containerProperties, topicPartitions, i);
configureChildContainer(i, container);
if (isPaused()) {
container.pause();
}
container.start();
this.containers.add(container);
}
}
}
@SuppressWarnings("deprecation")
private void configureChildContainer(int index, KafkaMessageListenerContainer<K, V> container) {
String beanName = getBeanName();
beanName = (beanName == null ? "consumer" : beanName) + "-" + index;
container.setBeanName(beanName);
ApplicationContext applicationContext = getApplicationContext();
if (applicationContext != null) {
container.setApplicationContext(applicationContext);
}
ApplicationEventPublisher publisher = getApplicationEventPublisher();
if (publisher != null) {
container.setApplicationEventPublisher(publisher);
}
container.setClientIdSuffix(this.concurrency > 1 || this.alwaysClientIdSuffix ? "-" + index : "");
container.setGenericErrorHandler(getGenericErrorHandler());
container.setCommonErrorHandler(getCommonErrorHandler());
container.setAfterRollbackProcessor(getAfterRollbackProcessor());
container.setRecordInterceptor(getRecordInterceptor());
container.setBatchInterceptor(getBatchInterceptor());
container.setInterceptBeforeTx(isInterceptBeforeTx());
container.setListenerInfo(getListenerInfo());
container.setEmergencyStop(() -> {
stopAbnormally(() -> {
});
});
AsyncListenableTaskExecutor exec = container.getContainerProperties().getConsumerTaskExecutor();
if (exec == null) {
if ((this.executors.size() > index)) {
exec = this.executors.get(index);
}
else {
exec = new SimpleAsyncTaskExecutor(beanName + "-C-");
this.executors.add(exec);
}
container.getContainerProperties().setConsumerTaskExecutor(exec);
}
}
private KafkaMessageListenerContainer<K, V> constructContainer(ContainerProperties containerProperties,
@Nullable TopicPartitionOffset[] topicPartitions, int i) {
KafkaMessageListenerContainer<K, V> container;
if (topicPartitions == null) {
container = new KafkaMessageListenerContainer<>(this, this.consumerFactory, containerProperties); // NOSONAR
}
else {
container = new KafkaMessageListenerContainer<>(this, this.consumerFactory, // NOSONAR
containerProperties, partitionSubset(containerProperties, i));
}
return container;
}
private TopicPartitionOffset[] partitionSubset(ContainerProperties containerProperties, int index) {
TopicPartitionOffset[] topicPartitions = containerProperties.getTopicPartitions();
if (this.concurrency == 1) {
return topicPartitions; // NOSONAR
}
else {
int numPartitions = topicPartitions.length; // NOSONAR
if (numPartitions == this.concurrency) {
return new TopicPartitionOffset[] { topicPartitions[index] };
}
else {
int perContainer = numPartitions / this.concurrency;
TopicPartitionOffset[] subset;
if (index == this.concurrency - 1) {
subset = Arrays.copyOfRange(topicPartitions, index * perContainer, topicPartitions.length);
}
else {
subset = Arrays.copyOfRange(topicPartitions, index * perContainer, (index + 1) * perContainer);
}
return subset;
}
}
}
/*
* Under lifecycle lock.
*/
@Override
protected void doStop(final Runnable callback, boolean normal) {
final AtomicInteger count = new AtomicInteger();
if (isRunning()) {
boolean childRunning = isChildRunning();
setRunning(false);
if (!childRunning) {
callback.run();
}
for (KafkaMessageListenerContainer<K, V> container : this.containers) {
if (container.isRunning()) {
count.incrementAndGet();
}
}
for (KafkaMessageListenerContainer<K, V> container : this.containers) {
if (container.isRunning()) {
if (normal) {
container.stop(() -> {
if (count.decrementAndGet() <= 0) {
callback.run();
}
});
}
else {
container.stopAbnormally(() -> {
if (count.decrementAndGet() <= 0) {
callback.run();
}
});
}
}
}
this.containers.clear();
setStoppedNormally(normal);
}
}
@Override
public void pause() {
synchronized (this.lifecycleMonitor) {
super.pause();
this.containers.forEach(AbstractMessageListenerContainer::pause);
}
}
@Override
public void resume() {
synchronized (this.lifecycleMonitor) {
super.resume();
this.containers.forEach(AbstractMessageListenerContainer::resume);
}
}
@Override
public void pausePartition(TopicPartition topicPartition) {
synchronized (this.lifecycleMonitor) {
super.pausePartition(topicPartition);
this.containers
.stream()
.filter(container -> containsPartition(topicPartition, container))
.forEach(container -> container.pausePartition(topicPartition));
}
}
@Override
public void resumePartition(TopicPartition topicPartition) {
synchronized (this.lifecycleMonitor) {
super.resumePartition(topicPartition);
this.containers
.stream()
.filter(container -> containsPartition(topicPartition, container))
.forEach(container -> container.resumePartition(topicPartition));
}
}
@Override
public boolean isPartitionPaused(TopicPartition topicPartition) {
return this
.containers
.stream()
.anyMatch(container -> container.isPartitionPaused(topicPartition));
}
@Override
public boolean isInExpectedState() {
return (isRunning() || isStoppedNormally()) && this.containers
.stream()
.map(container -> container.isInExpectedState())
.allMatch(bool -> Boolean.TRUE.equals(bool));
}
private boolean containsPartition(TopicPartition topicPartition, KafkaMessageListenerContainer<K, V> container) {
Collection<TopicPartition> assignedPartitions = container.getAssignedPartitions();
return assignedPartitions != null && assignedPartitions.contains(topicPartition);
}
@Override
public String toString() {
return "ConcurrentMessageListenerContainer [concurrency=" + this.concurrency + ", beanName="
+ this.getBeanName() + ", running=" + this.isRunning() + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConditionalDelegatingBatchErrorHandler.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
/**
* An error handler that delegates to different error handlers, depending on the exception
* type.
*
* @author Gary Russell
* @since 2.7.4
* @deprecated in favor of {@link CommonDelegatingErrorHandler}.
*/
@Deprecated
public class ConditionalDelegatingBatchErrorHandler implements ListenerInvokingBatchErrorHandler {
private final ContainerAwareBatchErrorHandler defaultErrorHandler;
private final Map<Class<? extends Throwable>, ContainerAwareBatchErrorHandler> delegates = new LinkedHashMap<>();
/**
* Construct an instance with a default error handler that will be invoked if the
* exception has no matches.
* @param defaultErrorHandler the default error handler.
*/
public ConditionalDelegatingBatchErrorHandler(ContainerAwareBatchErrorHandler defaultErrorHandler) {
Assert.notNull(defaultErrorHandler, "'defaultErrorHandler' cannot be null");
this.defaultErrorHandler = defaultErrorHandler;
}
/**
* Set the delegate error handlers; a {@link LinkedHashMap} argument is recommended so
* that the delegates are searched in a known order.
* @param delegates the delegates.
*/
public void setErrorHandlers(Map<Class<? extends Throwable>, ContainerAwareBatchErrorHandler> delegates) {
this.delegates.clear();
this.delegates.putAll(delegates);
}
/**
* Add a delegate to the end of the current collection.
* @param throwable the throwable for this handler.
* @param handler the handler.
*/
public void addDelegate(Class<? extends Throwable> throwable, ContainerAwareBatchErrorHandler handler) {
this.delegates.put(throwable, handler);
}
@Override
public void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> records, Consumer<?, ?> consumer,
MessageListenerContainer container) {
// Never called but, just in case
doHandle(thrownException, records, consumer, container, null);
}
@Override
public void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> records, Consumer<?, ?> consumer,
MessageListenerContainer container, Runnable invokeListener) {
doHandle(thrownException, records, consumer, container, invokeListener);
}
protected void doHandle(Exception thrownException, @Nullable ConsumerRecords<?, ?> records, Consumer<?, ?> consumer,
MessageListenerContainer container, @Nullable Runnable invokeListener) {
Throwable cause = thrownException;
if (cause instanceof ListenerExecutionFailedException) {
cause = thrownException.getCause();
}
if (cause != null) {
Class<? extends Throwable> causeClass = cause.getClass();
for (Entry<Class<? extends Throwable>, ContainerAwareBatchErrorHandler> entry : this.delegates.entrySet()) {
if (entry.getKey().isAssignableFrom(causeClass)) {
entry.getValue().handle(thrownException, records, consumer, container, invokeListener);
return;
}
}
}
this.defaultErrorHandler.handle(thrownException, records, consumer, container, invokeListener);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConditionalDelegatingErrorHandler.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
/**
* An error handler that delegates to different error handlers, depending on the exception
* type.
*
* @author Gary Russell
* @since 2.7.4
* @deprecated in favor of {@link CommonDelegatingErrorHandler}.
*/
@Deprecated
public class ConditionalDelegatingErrorHandler implements ContainerAwareErrorHandler {
private final ContainerAwareErrorHandler defaultErrorHandler;
private final Map<Class<? extends Throwable>, ContainerAwareErrorHandler> delegates = new LinkedHashMap<>();
/**
* Construct an instance with a default error handler that will be invoked if the
* exception has no matches.
* @param defaultErrorHandler the default error handler.
*/
public ConditionalDelegatingErrorHandler(ContainerAwareErrorHandler defaultErrorHandler) {
Assert.notNull(defaultErrorHandler, "'defaultErrorHandler' cannot be null");
this.defaultErrorHandler = defaultErrorHandler;
}
/**
* Set the delegate error handlers; a {@link LinkedHashMap} argument is recommended so
* that the delegates are searched in a known order.
* @param delegates the delegates.
*/
public void setErrorHandlers(Map<Class<? extends Throwable>, ContainerAwareErrorHandler> delegates) {
this.delegates.clear();
this.delegates.putAll(delegates);
}
/**
* Add a delegate to the end of the current collection.
* @param throwable the throwable for this handler.
* @param handler the handler.
*/
public void addDelegate(Class<? extends Throwable> throwable, ContainerAwareErrorHandler handler) {
this.delegates.put(throwable, handler);
}
@Override
public void handle(Exception thrownException, @Nullable List<ConsumerRecord<?, ?>> records, Consumer<?, ?> consumer,
MessageListenerContainer container) {
Throwable cause = thrownException;
if (cause instanceof ListenerExecutionFailedException) {
cause = thrownException.getCause();
}
if (cause != null) {
Class<? extends Throwable> causeClass = cause.getClass();
for (Entry<Class<? extends Throwable>, ContainerAwareErrorHandler> entry : this.delegates.entrySet()) {
if (entry.getKey().isAssignableFrom(causeClass)) {
entry.getValue().handle(thrownException, records, consumer, container);
return;
}
}
}
this.defaultErrorHandler.handle(thrownException, records, consumer, container);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConsumerAwareBatchErrorHandler.java | /*
* Copyright 2017-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.lang.Nullable;
/**
* An error handler that has access to the consumer, for example to adjust
* offsets after an error.
*
* @author Gary Russell
* @since 2.0
*
*/
@FunctionalInterface
public interface ConsumerAwareBatchErrorHandler extends BatchErrorHandler {
@Override
default void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data) {
throw new UnsupportedOperationException("Container should never call this");
}
@Override
void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data, Consumer<?, ?> consumer);
@Override
default void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container) {
handle(thrownException, data, consumer);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConsumerAwareErrorHandler.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.lang.Nullable;
/**
* An error handler that has access to the consumer, for example to adjust
* offsets after an error.
*
* @author Gary Russell
* @since 2.0
*
*/
@FunctionalInterface
public interface ConsumerAwareErrorHandler extends ErrorHandler {
@Override
default void handle(Exception thrownException, @Nullable ConsumerRecord<?, ?> data) {
throw new UnsupportedOperationException("Container should never call this");
}
@Override
void handle(Exception thrownException, @Nullable ConsumerRecord<?, ?> data, Consumer<?, ?> consumer);
@Override
default void handle(Exception thrownException, @Nullable List<ConsumerRecord<?, ?>> data, Consumer<?, ?> consumer,
MessageListenerContainer container) {
handle(thrownException, null, consumer);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConsumerAwareListenerErrorHandler.java | /*
* Copyright 2017-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.springframework.messaging.Message;
/**
* An error handler that has access to the consumer. IMPORTANT: do not perform seek
* operations on the consumer, the container won't be aware. Use a container-level error
* handler such as the {@link SeekToCurrentErrorHandler} for such situations.
*
* @author Gary Russell
* @since 2.0
*
*/
@FunctionalInterface
public interface ConsumerAwareListenerErrorHandler extends KafkaListenerErrorHandler {
@Override
default Object handleError(Message<?> message, ListenerExecutionFailedException exception) {
throw new UnsupportedOperationException("Container should never call this");
}
@Override
Object handleError(Message<?> message, ListenerExecutionFailedException exception, Consumer<?, ?> consumer);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConsumerAwareMessageListener.java | /*
* Copyright 2017-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
/**
* Listener for handling individual incoming Kafka messages.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*
* @since 2.0
*/
@FunctionalInterface
public interface ConsumerAwareMessageListener<K, V> extends MessageListener<K, V> {
/**
* Invoked with data from kafka. Containers should never call this since it they
* will detect we are a consumer aware acknowledging listener.
* @param data the data to be processed.
*/
@Override
default void onMessage(ConsumerRecord<K, V> data) {
throw new UnsupportedOperationException("Container should never call this");
}
@Override
void onMessage(ConsumerRecord<K, V> data, Consumer<?, ?> consumer);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConsumerAwareRebalanceListener.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.Collection;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.common.TopicPartition;
import org.springframework.core.log.LogAccessor;
/**
* A rebalance listener that provides access to the consumer object. Starting with version
* 2.1.5, as a convenience, default no-op implementations are provided for all methods,
* allowing the user to implement just those (s)he is interested in.
*
* @author Gary Russell
* @since 2.0
*
*/
public interface ConsumerAwareRebalanceListener extends ConsumerRebalanceListener {
/**
* {@link LogAccessor} for use in default methods.
*/
LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(ConsumerAwareRebalanceListener.class));
/**
* The same as {@link #onPartitionsRevoked(Collection)} with the additional consumer
* parameter. It is invoked by the container before any pending offsets are committed.
* @param consumer the consumer.
* @param partitions the partitions.
*/
default void onPartitionsRevokedBeforeCommit(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
try {
onPartitionsRevoked(partitions);
}
catch (Exception e) { // NOSONAR
LOGGER.debug(e, "User method threw exception");
}
}
/**
* The same as {@link #onPartitionsRevoked(Collection)} with the additional consumer
* parameter. It is invoked by the container after any pending offsets are committed.
* @param consumer the consumer.
* @param partitions the partitions.
*/
default void onPartitionsRevokedAfterCommit(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
}
/**
* The same as {@link #onPartitionsLost(Collection)} with an additional consumer parameter.
* @param consumer the consumer.
* @param partitions the partitions.
* @since 2.4
*/
default void onPartitionsLost(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
try {
onPartitionsLost(partitions);
}
catch (Exception e) { // NOSONAR
LOGGER.debug(e, "User method threw exception");
}
}
/**
* The same as {@link #onPartitionsAssigned(Collection)} with the additional consumer
* parameter.
* @param consumer the consumer.
* @param partitions the partitions.
*/
default void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
try {
onPartitionsAssigned(partitions);
}
catch (Exception e) { // NOSONAR
LOGGER.debug(e, "User method threw exception");
}
}
@Override
default void onPartitionsRevoked(Collection<TopicPartition> partitions) {
}
@Override
default void onPartitionsAssigned(Collection<TopicPartition> partitions) {
}
@Override
default void onPartitionsLost(Collection<TopicPartition> partitions) {
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConsumerAwareRecordInterceptor.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.lang.Nullable;
/**
* A {@link RecordInterceptor} that has access to the {@link Consumer}.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.7
*
*/
@FunctionalInterface
public interface ConsumerAwareRecordInterceptor<K, V> extends RecordInterceptor<K, V> {
@SuppressWarnings("deprecation")
@Override
@Nullable
default ConsumerRecord<K, V> intercept(ConsumerRecord<K, V> record) {
throw new UnsupportedOperationException("Container should never call this");
}
@Override
@Nullable
ConsumerRecord<K, V> intercept(ConsumerRecord<K, V> record, Consumer<K, V> consumer);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConsumerAwareRecordRecoverer.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.lang.Nullable;
/**
* A {@link ConsumerRecordRecoverer} that supports getting a reference to the
* {@link Consumer}.
*
* @author Gary Russell
* @since 2.7
*
*/
@FunctionalInterface
public interface ConsumerAwareRecordRecoverer extends ConsumerRecordRecoverer {
@Override
default void accept(ConsumerRecord<?, ?> record, Exception exception) {
accept(record, null, exception);
}
/**
* Recover the record.
* @param record the record.
* @param consumer the consumer.
* @param exception the exception.
* @since 2.7
*/
void accept(ConsumerRecord<?, ?> record, @Nullable Consumer<?, ?> consumer, Exception exception);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConsumerProperties.java | /*
* Copyright 2019-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;
import java.util.regex.Pattern;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.springframework.kafka.support.KafkaUtils;
import org.springframework.kafka.support.LogIfLevelEnabled;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
/**
* Common consumer properties.
*
* @author Gary Russell
* @since 2.3
*
*/
public class ConsumerProperties {
/**
* The default {@link #setPollTimeout(long) pollTimeout} (ms).
*/
public static final long DEFAULT_POLL_TIMEOUT = 5_000L;
private static final int DEFAULT_COMMIT_RETRIES = 3;
/**
* Topic names.
*/
private final String[] topics;
/**
* Topic pattern.
*/
private final Pattern topicPattern;
/**
* Topics/partitions/initial offsets.
*/
private final TopicPartitionOffset[] topicPartitions;
/**
* The max time to block in the consumer waiting for records.
*/
private long pollTimeout = DEFAULT_POLL_TIMEOUT;
/**
* Override the group id.
*/
private String groupId;
/**
* Override the client id.
*/
private String clientId = "";
/**
* A user defined {@link ConsumerRebalanceListener} implementation.
*/
private ConsumerRebalanceListener consumerRebalanceListener;
private Duration syncCommitTimeout;
/**
* The commit callback; by default a simple logging callback is used to log
* success at DEBUG level and failures at ERROR level.
*/
private OffsetCommitCallback commitCallback;
/**
* Whether or not to call consumer.commitSync() or commitAsync() when the
* container is responsible for commits. Default true.
*/
private boolean syncCommits = true;
private LogIfLevelEnabled.Level commitLogLevel = LogIfLevelEnabled.Level.DEBUG;
private boolean onlyLogRecordMetadata = true;
private Properties kafkaConsumerProperties = new Properties();
private Duration authExceptionRetryInterval;
private int commitRetries = DEFAULT_COMMIT_RETRIES;
private boolean fixTxOffsets;
private boolean checkDeserExWhenKeyNull;
private boolean checkDeserExWhenValueNull;
/**
* Create properties for a container that will subscribe to the specified topics.
* @param topics the topics.
*/
public ConsumerProperties(String... topics) {
Assert.notEmpty(topics, "An array of topics must be provided");
this.topics = topics.clone();
this.topicPattern = null;
this.topicPartitions = null;
}
/**
* Create properties for a container that will subscribe to topics matching the
* specified pattern. The framework will create a container that subscribes to all
* topics matching the specified pattern to get dynamically assigned partitions. The
* pattern matching will be performed periodically against topics existing at the time
* of check.
* @param topicPattern the pattern.
* @see org.apache.kafka.clients.CommonClientConfigs#METADATA_MAX_AGE_CONFIG
*/
public ConsumerProperties(Pattern topicPattern) {
this.topics = null;
this.topicPattern = topicPattern;
this.topicPartitions = null;
}
/**
* Create properties for a container that will assign itself the provided topic
* partitions.
* @param topicPartitions the topic partitions.
*/
public ConsumerProperties(TopicPartitionOffset... topicPartitions) {
this.topics = null;
this.topicPattern = null;
Assert.notEmpty(topicPartitions, "An array of topicPartitions must be provided");
this.topicPartitions = Arrays.copyOf(topicPartitions, topicPartitions.length);
}
/**
* Return the configured topics.
* @return the topics.
*/
@Nullable
public String[] getTopics() {
return this.topics != null
? Arrays.copyOf(this.topics, this.topics.length)
: null;
}
/**
* Return the configured topic pattern.
* @return the topic pattern.
*/
@Nullable
public Pattern getTopicPattern() {
return this.topicPattern;
}
/**
* Return the configured {@link TopicPartitionOffset}s.
* @return the topics/partitions.
* @since 2.5
*/
@Nullable
public TopicPartitionOffset[] getTopicPartitions() {
return this.topicPartitions != null
? Arrays.copyOf(this.topicPartitions, this.topicPartitions.length)
: null;
}
/**
* Set the max time to block in the consumer waiting for records.
* @param pollTimeout the timeout in ms; default {@value #DEFAULT_POLL_TIMEOUT}.
*/
public void setPollTimeout(long pollTimeout) {
this.pollTimeout = pollTimeout;
}
public long getPollTimeout() {
return this.pollTimeout;
}
/**
* Set the group id for this container. Overrides any {@code group.id} property
* provided by the consumer factory configuration.
* @param groupId the group id.
*/
public void setGroupId(String groupId) {
this.groupId = groupId;
}
/**
* Return the container's group id.
* @return the group id.
*/
@Nullable
public String getGroupId() {
return this.groupId;
}
/**
* Return the client id.
* @return the client id.
* @see #setClientId(String)
*/
public String getClientId() {
return this.clientId;
}
/**
* Set the client id; overrides the consumer factory client.id property.
* When used in a concurrent container, will be suffixed with '-n' to
* provide a unique value for each consumer.
* @param clientId the client id.
*/
public void setClientId(String clientId) {
this.clientId = clientId;
}
/**
* Set the user defined {@link ConsumerRebalanceListener} implementation.
* @param consumerRebalanceListener the {@link ConsumerRebalanceListener} instance
*/
public void setConsumerRebalanceListener(ConsumerRebalanceListener consumerRebalanceListener) {
this.consumerRebalanceListener = consumerRebalanceListener;
}
/**
* Return the rebalance listener.
* @return the listener.
*/
@Nullable
public ConsumerRebalanceListener getConsumerRebalanceListener() {
return this.consumerRebalanceListener;
}
/**
* Set the timeout for commitSync operations (if {@link #isSyncCommits()}. Overrides
* the default api timeout property.
* @param syncCommitTimeout the timeout.
* @see #setSyncCommits(boolean)
*/
public void setSyncCommitTimeout(@Nullable Duration syncCommitTimeout) {
this.syncCommitTimeout = syncCommitTimeout;
}
/**
* Return the sync commit timeout.
* @return the timeout.
*/
@Nullable
public Duration getSyncCommitTimeout() {
return this.syncCommitTimeout;
}
/**
* Set the commit callback; by default a simple logging callback is used to log
* success at DEBUG level and failures at ERROR level.
* Used when {@link #setSyncCommits(boolean) syncCommits} is false.
* @param commitCallback the callback.
* @see #setSyncCommits(boolean)
*/
public void setCommitCallback(OffsetCommitCallback commitCallback) {
this.commitCallback = commitCallback;
}
/**
* Return the commit callback.
* @return the callback.
*/
@Nullable
public OffsetCommitCallback getCommitCallback() {
return this.commitCallback;
}
/**
* Set whether or not to call consumer.commitSync() or commitAsync() when the
* container is responsible for commits. Default true.
* @param syncCommits true to use commitSync().
* @see #setSyncCommitTimeout(Duration)
* @see #setCommitCallback(OffsetCommitCallback)
* @see #setCommitLogLevel(org.springframework.kafka.support.LogIfLevelEnabled.Level)
* @see #setCommitRetries(int)
*/
public void setSyncCommits(boolean syncCommits) {
this.syncCommits = syncCommits;
}
public boolean isSyncCommits() {
return this.syncCommits;
}
/**
* The level at which to log offset commits.
* @return the level.
*/
public LogIfLevelEnabled.Level getCommitLogLevel() {
return this.commitLogLevel;
}
/**
* Set the level at which to log offset commits.
* Default: DEBUG.
* @param commitLogLevel the level.
*/
public void setCommitLogLevel(LogIfLevelEnabled.Level commitLogLevel) {
Assert.notNull(commitLogLevel, "'commitLogLevel' cannot be null");
this.commitLogLevel = commitLogLevel;
}
/**
* Get the consumer properties that will be merged with the consumer properties
* provided by the consumer factory; properties here will supersede any with the same
* name(s) in the consumer factory. You can add non-String-valued properties, but the
* property name (hashtable key) must be String; all others will be ignored.
* {@code group.id} and {@code client.id} are ignored.
* @return the properties.
* @see org.apache.kafka.clients.consumer.ConsumerConfig
* @see #setGroupId(String)
* @see #setClientId(String)
*/
public Properties getKafkaConsumerProperties() {
return this.kafkaConsumerProperties;
}
/**
* Set the consumer properties that will be merged with the consumer properties
* provided by the consumer factory; properties here will supersede any with the same
* name(s) in the consumer factory.
* {@code group.id} and {@code client.id} are ignored.
* Property keys must be {@link String}s.
* @param kafkaConsumerProperties the properties.
* @see org.apache.kafka.clients.consumer.ConsumerConfig
* @see #setGroupId(String)
* @see #setClientId(String)
*/
public void setKafkaConsumerProperties(Properties kafkaConsumerProperties) {
Assert.notNull(kafkaConsumerProperties, "'kafkaConsumerProperties' cannot be null");
this.kafkaConsumerProperties = kafkaConsumerProperties;
}
/**
* Get the authentication/authorization retry interval.
* @return the interval.
* @deprecated in favor of {@link #getAuthExceptionRetryInterval()}.
*/
@Deprecated
@Nullable
public Duration getAuthorizationExceptionRetryInterval() {
return this.authExceptionRetryInterval;
}
/**
* Set the interval between retries after and
* {@link org.apache.kafka.common.errors.AuthenticationException} or
* {@code org.apache.kafka.common.errors.AuthorizationException} is thrown by
* {@code KafkaConsumer}. By default the field is null and retries are disabled. In
* such case the container will be stopped.
*
* The interval must be less than {@code max.poll.interval.ms} consumer property.
*
* @param authorizationExceptionRetryInterval the duration between retries
* @since 2.3.5
* @deprecated in favor of {@link #setAuthExceptionRetryInterval(Duration)}.
*/
@Deprecated
public void setAuthorizationExceptionRetryInterval(Duration authorizationExceptionRetryInterval) {
this.authExceptionRetryInterval = authorizationExceptionRetryInterval;
}
/**
* Get the authentication/authorization retry interval.
* @return the interval.
*/
@Nullable
public Duration getAuthExceptionRetryInterval() {
return this.authExceptionRetryInterval;
}
/**
* Set the interval between retries after and
* {@link org.apache.kafka.common.errors.AuthenticationException} or
* {@code org.apache.kafka.common.errors.AuthorizationException} is thrown by
* {@code KafkaConsumer}. By default the field is null and retries are disabled. In
* such case the container will be stopped.
*
* The interval must be less than {@code max.poll.interval.ms} consumer property.
*
* @param authExceptionRetryInterval the duration between retries
* @since 2.8
*/
public void setAuthExceptionRetryInterval(Duration authExceptionRetryInterval) {
this.authExceptionRetryInterval = authExceptionRetryInterval;
}
/**
* The number of retries allowed when a
* {@link org.apache.kafka.clients.consumer.RetriableCommitFailedException} is thrown
* by the consumer when using {@link #setSyncCommits(boolean)} set to true.
* @return the number of retries.
* @since 2.3.9
* @see #setSyncCommits(boolean)
*/
public int getCommitRetries() {
return this.commitRetries;
}
/**
* Set number of retries allowed when a
* {@link org.apache.kafka.clients.consumer.RetriableCommitFailedException} is thrown
* by the consumer when using {@link #setSyncCommits(boolean)} set to true. Default 3
* (4 attempts total).
* @param commitRetries the commitRetries.
* @since 2.3.9
* @see #setSyncCommits(boolean)
*/
public void setCommitRetries(int commitRetries) {
this.commitRetries = commitRetries;
}
@Deprecated
public boolean isOnlyLogRecordMetadata() {
return this.onlyLogRecordMetadata;
}
/**
* Set to false to log {@code record.toString()} in log messages instead of
* {@code topic-partition@offset}.
* @param onlyLogRecordMetadata false to log the entire record.
* @since 2.2.14
* @deprecated in favor of
* {@link KafkaUtils#setConsumerRecordFormatter(java.util.function.Function)}.
*/
@Deprecated
public void setOnlyLogRecordMetadata(boolean onlyLogRecordMetadata) {
this.onlyLogRecordMetadata = onlyLogRecordMetadata;
}
/**
* Whether or not to correct terminal transactional offsets.
* @return true to fix.
* @since 2.5.6
* @see #setFixTxOffsets(boolean)
*/
public boolean isFixTxOffsets() {
return this.fixTxOffsets;
}
/**
* When consuming records produced by a transactional producer, and the consumer is
* positioned at the end of a partition, the lag can incorrectly be reported as
* greater than zero, due to the pseudo record used to indicate transaction
* commit/rollback and, possibly, the presence of rolled-back records. This does not
* functionally affect the consumer but some users have expressed concern that the
* "lag" is non-zero. Set this to true and the container will correct such
* mis-reported offsets. The check is performed before the next poll to avoid adding
* significant complexity to the commit processing. IMPORTANT: At the time of writing,
* the lag will only be corrected if the consumer is configured with
* {@code isolation.level=read_committed} and {@code max.poll.records} is greater than
* 1. See https://issues.apache.org/jira/browse/KAFKA-10683 for more information.
* @param fixTxOffsets true to correct the offset(s).
* @since 2.5.6
*/
public void setFixTxOffsets(boolean fixTxOffsets) {
this.fixTxOffsets = fixTxOffsets;
}
/**
* Always check for a deserialization exception header with a null key.
* @return true to check.
* @since 2.8.1
*/
public boolean isCheckDeserExWhenKeyNull() {
return this.checkDeserExWhenKeyNull;
}
/**
* Set to true to always check for
* {@link org.springframework.kafka.support.serializer.DeserializationException}
* header when a null key is received. Useful when the consumer code cannot determine
* that an
* {@link org.springframework.kafka.support.serializer.ErrorHandlingDeserializer} has
* been configured, such as when using a delegating deserializer.
* @param checkDeserExWhenKeyNull true to always check.
* @since 2.8.1
*/
public void setCheckDeserExWhenKeyNull(boolean checkDeserExWhenKeyNull) {
this.checkDeserExWhenKeyNull = checkDeserExWhenKeyNull;
}
/**
* Always check for a deserialization exception header with a null value.
* @return true to check.
* @since 2.8.1
*/
public boolean isCheckDeserExWhenValueNull() {
return this.checkDeserExWhenValueNull;
}
/**
* Set to true to always check for
* {@link org.springframework.kafka.support.serializer.DeserializationException}
* header when a null value is received. Useful when the consumer code cannot
* determine that an
* {@link org.springframework.kafka.support.serializer.ErrorHandlingDeserializer} has
* been configured, such as when using a delegating deserializer.
* @param checkDeserExWhenValueNull true to always check.
* @since 2.8.1
*/
public void setCheckDeserExWhenValueNull(boolean checkDeserExWhenValueNull) {
this.checkDeserExWhenValueNull = checkDeserExWhenValueNull;
}
@Override
public String toString() {
return "ConsumerProperties ["
+ renderProperties()
+ "]";
}
protected final String renderProperties() {
return renderTopics()
+ "\n pollTimeout=" + this.pollTimeout
+ (this.groupId != null ? "\n groupId=" + this.groupId : "")
+ (StringUtils.hasText(this.clientId) ? "\n clientId=" + this.clientId : "")
+ (this.consumerRebalanceListener != null
? "\n consumerRebalanceListener=" + this.consumerRebalanceListener
: "")
+ (this.commitCallback != null ? "\n commitCallback=" + this.commitCallback : "")
+ "\n syncCommits=" + this.syncCommits
+ (this.syncCommitTimeout != null ? "\n syncCommitTimeout=" + this.syncCommitTimeout : "")
+ (this.kafkaConsumerProperties.size() > 0 ? "\n properties=" + this.kafkaConsumerProperties : "")
+ "\n authExceptionRetryInterval=" + this.authExceptionRetryInterval
+ "\n commitRetries=" + this.commitRetries
+ "\n fixTxOffsets" + this.fixTxOffsets;
}
private String renderTopics() {
return (this.topics != null ? "\n topics=" + Arrays.toString(this.topics) : "")
+ (this.topicPattern != null ? "\n topicPattern=" + this.topicPattern : "")
+ (this.topicPartitions != null
? "\n topicPartitions=" + Arrays.toString(this.topicPartitions)
: "");
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConsumerRecordRecoverer.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.function.BiConsumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
/**
* A {@link BiConsumer} extension for recovering consumer records.
*
* @author Gary Russell
* @since 2.3
*
*/
@FunctionalInterface
public interface ConsumerRecordRecoverer extends BiConsumer<ConsumerRecord<?, ?>, Exception> {
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ConsumerSeekAware.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.Collection;
import java.util.Map;
import org.apache.kafka.common.TopicPartition;
/**
* Listeners that implement this interface are provided with a
* {@link ConsumerSeekCallback} which can be used to perform a
* seek operation.
*
* @author Gary Russell
* @since 1.1
*
*/
public interface ConsumerSeekAware {
/**
* Register the callback to use when seeking at some arbitrary time. When used with a
* {@code ConcurrentMessageListenerContainer} or the same listener instance in multiple
* containers listeners should store the callback in a {@code ThreadLocal}.
* @param callback the callback.
*/
default void registerSeekCallback(ConsumerSeekCallback callback) {
// do nothing
}
/**
* When using group management, called when partition assignments change.
* @param assignments the new assignments and their current offsets.
* @param callback the callback to perform an initial seek after assignment.
*/
default void onPartitionsAssigned(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
// do nothing
}
/**
* When using group management, called when partition assignments are revoked.
* Listeners should discard any callback saved from
* {@link #registerSeekCallback(ConsumerSeekCallback)} on this thread.
* @param partitions the partitions that have been revoked.
* @since 2.3
*/
default void onPartitionsRevoked(Collection<TopicPartition> partitions) {
// do nothing
}
/**
* If the container is configured to emit idle container events, this method is called
* when the container idle event is emitted - allowing a seek operation.
* @param assignments the new assignments and their current offsets.
* @param callback the callback to perform a seek.
*/
default void onIdleContainer(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
// do nothing
}
/**
* Called when the listener consumer terminates allowing implementations to clean up
* state, such as thread locals.
* @since 2.4
*/
default void unregisterSeekCallback() {
// do nothing
}
/**
* A callback that a listener can invoke to seek to a specific offset.
*/
interface ConsumerSeekCallback {
/**
* Perform a seek operation. When called from
* {@link ConsumerSeekAware#onPartitionsAssigned(Map, ConsumerSeekCallback)} or
* from {@link ConsumerSeekAware#onIdleContainer(Map, ConsumerSeekCallback)}
* perform the seek immediately on the consumer. When called from elsewhere,
* queue the seek operation to the consumer. The queued seek will occur after any
* pending offset commits. The consumer must be currently assigned the specified
* partition.
* @param topic the topic.
* @param partition the partition.
* @param offset the offset (absolute).
*/
void seek(String topic, int partition, long offset);
/**
* Perform a seek to beginning operation. When called from
* {@link ConsumerSeekAware#onPartitionsAssigned(Map, ConsumerSeekCallback)} or
* from {@link ConsumerSeekAware#onIdleContainer(Map, ConsumerSeekCallback)}
* perform the seek immediately on the consumer. When called from elsewhere, queue
* the seek operation to the consumer. The queued seek will occur after
* any pending offset commits. The consumer must be currently assigned the
* specified partition.
* @param topic the topic.
* @param partition the partition.
*/
void seekToBeginning(String topic, int partition);
/**
* Perform a seek to beginning operation. When called from
* {@link ConsumerSeekAware#onPartitionsAssigned(Map, ConsumerSeekCallback)} or
* from {@link ConsumerSeekAware#onIdleContainer(Map, ConsumerSeekCallback)}
* perform the seek immediately on the consumer. When called from elsewhere,
* queue the seek operation to the consumer for each
* {@link TopicPartition}. The seek will occur after any pending offset commits.
* The consumer must be currently assigned the specified partition(s).
* @param partitions the {@link TopicPartition}s.
* @since 2.3.4
*/
default void seekToBeginning(Collection<TopicPartition> partitions) {
throw new UnsupportedOperationException();
}
/**
* Perform a seek to end operation. When called from
* {@link ConsumerSeekAware#onPartitionsAssigned(Map, ConsumerSeekCallback)} or
* from {@link ConsumerSeekAware#onIdleContainer(Map, ConsumerSeekCallback)}
* perform the seek immediately on the consumer. When called from elsewhere, queue
* the seek operation to the consumer. The queued seek will occur after any
* pending offset commits. The consumer must be currently assigned the specified
* partition.
* @param topic the topic.
* @param partition the partition.
*/
void seekToEnd(String topic, int partition);
/**
* Perform a seek to end operation. When called from
* {@link ConsumerSeekAware#onPartitionsAssigned(Map, ConsumerSeekCallback)} or
* from {@link ConsumerSeekAware#onIdleContainer(Map, ConsumerSeekCallback)}
* perform the seek immediately on the consumer. When called from elsewhere, queue
* the seek operation to the consumer for each {@link TopicPartition}. The queued
* seek(s) will occur after any pending offset commits. The consumer must be
* currently assigned the specified partition(s).
* @param partitions the {@link TopicPartition}s.
* @since 2.3.4
*/
default void seekToEnd(Collection<TopicPartition> partitions) {
throw new UnsupportedOperationException();
}
/**
* Perform a seek relative to the start, end, or current position. When called
* from {@link ConsumerSeekAware#onPartitionsAssigned(Map, ConsumerSeekCallback)}
* or from {@link ConsumerSeekAware#onIdleContainer(Map, ConsumerSeekCallback)}
* perform the seek immediately on the consumer. When called from elsewhere, queue
* the seek operation. The queued seek will occur after any pending offset
* commits. The consumer must be currently assigned the specified partition.
* @param topic the topic.
* @param partition the partition.
* @param offset the offset; positive values are relative to the start, negative
* values are relative to the end, unless toCurrent is true.
* @param toCurrent true for the offset to be relative to the current position
* rather than the beginning or end.
* @since 2.3
*/
void seekRelative(String topic, int partition, long offset, boolean toCurrent);
/**
* Perform a seek to the first offset greater than or equal to the time stamp.
* When called from
* {@link ConsumerSeekAware#onPartitionsAssigned(Map, ConsumerSeekCallback)} or
* from {@link ConsumerSeekAware#onIdleContainer(Map, ConsumerSeekCallback)}
* perform the seek immediately on the consumer. When called from elsewhere, queue
* the seek operation. The queued seek will occur after any pending offset
* commits. The consumer must be currently assigned the specified partition. Use
* {@link #seekToTimestamp(Collection, long)} when seeking multiple partitions
* because the offset lookup is blocking.
* @param topic the topic.
* @param partition the partition.
* @param timestamp the time stamp.
* @since 2.3
* @see #seekToTimestamp(Collection, long)
*/
void seekToTimestamp(String topic, int partition, long timestamp);
/**
* Perform a seek to the first offset greater than or equal to the time stamp.
* When called from
* {@link ConsumerSeekAware#onPartitionsAssigned(Map, ConsumerSeekCallback)} or
* from {@link ConsumerSeekAware#onIdleContainer(Map, ConsumerSeekCallback)}
* perform the seek immediately on the consumer. When called from elsewhere, queue
* the seek operation. The queued seek will occur after any pending offset
* commits. The consumer must be currently assigned the specified partition.
* @param topicPartitions the topic/partitions.
* @param timestamp the time stamp.
* @since 2.3
*/
void seekToTimestamp(Collection<TopicPartition> topicPartitions, long timestamp);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ContainerAwareBatchErrorHandler.java | /*
* Copyright 2017-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.lang.Nullable;
/**
* An error handler that has access to the batch of records from the last poll the
* consumer, and the container.
*
* @author Gary Russell
* @since 2.1
*
*/
@FunctionalInterface
public interface ContainerAwareBatchErrorHandler extends ConsumerAwareBatchErrorHandler {
@Override
default void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data, Consumer<?, ?> consumer) {
throw new UnsupportedOperationException("Container should never call this");
}
@Override
void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container);
/**
* Handle the exception.
* @param thrownException the exception.
* @param data the consumer records.
* @param consumer the consumer.
* @param container the container.
* @param invokeListener a callback to re-invoke the listener.
* @since 2.3.7
*/
@Override
@SuppressWarnings("unused")
default void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data,
Consumer<?, ?> consumer, MessageListenerContainer container, @Nullable Runnable invokeListener) {
handle(thrownException, data, consumer, container);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ContainerAwareErrorHandler.java | /*
* Copyright 2017-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.lang.Nullable;
/**
* An error handler that has access to the unprocessed records from the last poll
* (including the failed record), the consumer, and the container.
* The records passed to the handler will not be passed to the listener
* (unless re-fetched if the handler performs seeks).
*
* @author Gary Russell
* @since 2.1
*
*/
@FunctionalInterface
public interface ContainerAwareErrorHandler extends RemainingRecordsErrorHandler {
@Override
default void handle(Exception thrownException, @Nullable List<ConsumerRecord<?, ?>> records,
Consumer<?, ?> consumer) {
throw new UnsupportedOperationException("Container should never call this");
}
@Override
void handle(Exception thrownException, @Nullable List<ConsumerRecord<?, ?>> records, Consumer<?, ?> consumer,
MessageListenerContainer container);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ContainerGroup.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.Collection;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.commons.logging.LogFactory;
import org.springframework.context.Lifecycle;
import org.springframework.core.log.LogAccessor;
import org.springframework.util.Assert;
/**
* A group of listener containers.
*
* @author Gary Russell
* @since 2.7.3
*
*/
public class ContainerGroup implements Lifecycle {
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(ContainerGroup.class));
private final String name;
private final Collection<MessageListenerContainer> containers = new LinkedHashSet<>();
private boolean running;
/**
* Construct an instance with the provided name.
* @param name the group name.
*/
public ContainerGroup(String name) {
this.name = name;
}
/**
* Construct an instance with the provided name and containers.
* @param name the group name.
* @param containers the containers.
*/
public ContainerGroup(String name, List<MessageListenerContainer> containers) {
this.name = name;
this.containers.addAll(containers);
}
/**
* Construct an instance with the provided name and containers.
* @param name the group name.
* @param containers the containers.
*/
public ContainerGroup(String name, MessageListenerContainer...containers) {
this.name = name;
for (MessageListenerContainer container : containers) {
this.containers.add(container);
}
}
/**
* Return the group name.
* @return the name.
*/
public String getName() {
return this.name;
}
/**
* Return the listener ids of the containers in this group.
* @return the listener ids.
*/
public Collection<String> getListenerIds() {
return this.containers.stream()
.map(container -> container.getListenerId())
.map(id -> {
Assert.state(id != null, "Containers must have listener ids to be used here");
return id;
})
.collect(Collectors.toList());
}
/**
* Return true if the provided container is in this group.
* @param container the container.
* @return true if it is in this group.
*/
public boolean contains(MessageListenerContainer container) {
return this.containers.contains(container);
}
/**
* Return true if all containers in this group are stopped.
* @return true if all are stopped.
*/
public boolean allStopped() {
return this.containers.stream()
.allMatch(container -> !container.isRunning());
}
/**
* Add one or more containers to the group.
* @param theContainers the container(s).
*/
public void addContainers(MessageListenerContainer... theContainers) {
for (MessageListenerContainer container : theContainers) {
this.containers.add(container);
}
}
/**
* Remove a container from the group.
* @param container the container.
* @return true if the container was removed.
*/
public boolean removeContainer(MessageListenerContainer container) {
return this.containers.remove(container);
}
@Override
public synchronized void start() {
if (!this.running) {
this.containers.forEach(container -> {
LOGGER.debug(() -> "Starting: " + container);
container.start();
});
this.running = true;
}
}
@Override
public synchronized void stop() {
if (this.running) {
this.containers.forEach(container -> container.stop());
this.running = false;
}
}
@Override
public synchronized boolean isRunning() {
return this.running;
}
@Override
public String toString() {
return "ContainerGroup [name=" + this.name + ", containers=" + this.containers + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ContainerGroupSequencer.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedHashSet;
import org.apache.commons.logging.LogFactory;
import org.springframework.beans.BeansException;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.ApplicationListener;
import org.springframework.context.SmartLifecycle;
import org.springframework.core.log.LogAccessor;
import org.springframework.core.task.SimpleAsyncTaskExecutor;
import org.springframework.core.task.TaskExecutor;
import org.springframework.kafka.event.ListenerContainerIdleEvent;
/**
* Sequence the starting of container groups when all containers in the previous group are
* idle.
*
* @author Gary Russell
* @since 2.7.3
*
*/
public class ContainerGroupSequencer implements ApplicationContextAware,
ApplicationListener<ListenerContainerIdleEvent>, SmartLifecycle {
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(ContainerGroupSequencer.class));
private final ListenerContainerRegistry registry;
private final long defaultIdleEventInterval;
private final Collection<String> groupNames = new LinkedHashSet<>();
private final Collection<ContainerGroup> groups = new LinkedHashSet<>();
private final TaskExecutor executor = new SimpleAsyncTaskExecutor("container-group-sequencer-");
private ApplicationContext applicationContext;
private boolean stopLastGroupWhenIdle;
private Iterator<ContainerGroup> iterator;
private ContainerGroup currentGroup;
private boolean autoStartup = true;
private int phase = AbstractMessageListenerContainer.DEFAULT_PHASE;
private boolean running;
/**
* Set containers in each group to not auto start. Start the containers in the first
* group. Start containers in group[n] when all containers in group[n-1] are idle;
* stop the containers in group[n-1].
* @param registry the registry.
* @param defaultIdleEventInterval the idle event interval if not already set.
* @param containerGroups The list of container groups, in order.
*/
public ContainerGroupSequencer(ListenerContainerRegistry registry, long defaultIdleEventInterval,
String... containerGroups) {
this.registry = registry;
this.defaultIdleEventInterval = defaultIdleEventInterval;
for (String groupName : containerGroups) {
this.groupNames.add(groupName);
}
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
/**
* Set to true to stop the containers in the final group when they go idle. By
* default, the containers in the final group remain running.
* @param stopLastGroupWhenIdle true to stop containers in the final group.
*/
public synchronized void setStopLastGroupWhenIdle(boolean stopLastGroupWhenIdle) {
this.stopLastGroupWhenIdle = stopLastGroupWhenIdle;
}
@Override
public boolean isAutoStartup() {
return this.autoStartup;
}
/**
* Set to false to not automatically start.
* @param autoStartup false to not start;
* @since 2.7.6
*/
public void setAutoStartup(boolean autoStartup) {
this.autoStartup = autoStartup;
}
@Override
public int getPhase() {
return this.phase;
}
/**
* Set the {@link SmartLifecycle#getPhase()}.
* @param phase the phase.
* @since 2.7.6
*/
public void setPhase(int phase) {
this.phase = phase;
}
@Override
public synchronized void onApplicationEvent(ListenerContainerIdleEvent event) {
LOGGER.debug(() -> event.toString());
MessageListenerContainer parent = event.getContainer(MessageListenerContainer.class);
MessageListenerContainer container = (MessageListenerContainer) event.getSource();
boolean inCurrentGroup = this.currentGroup != null && this.currentGroup.contains(parent);
if (this.running && inCurrentGroup && (this.iterator.hasNext() || this.stopLastGroupWhenIdle)) {
this.executor.execute(() -> {
LOGGER.debug(() -> "Stopping: " + container);
container.stop(() -> {
synchronized (this) {
if (!parent.isChildRunning()) {
this.executor.execute(() -> {
stopParentAndCheckGroup(parent);
});
}
}
});
});
}
}
private synchronized void stopParentAndCheckGroup(MessageListenerContainer parent) {
if (parent.isRunning()) {
LOGGER.debug(() -> "Stopping: " + parent);
parent.stop(() -> {
if (this.currentGroup != null) {
LOGGER.debug(() -> "Checking group: " + this.currentGroup.toString());
if (this.currentGroup.allStopped()) {
if (this.iterator.hasNext()) {
this.currentGroup = this.iterator.next();
LOGGER.debug(() -> "Starting next group: " + this.currentGroup);
this.currentGroup.start();
}
else {
this.currentGroup = null;
}
}
}
});
}
}
@Override
public synchronized void start() {
if (this.currentGroup != null) {
LOGGER.debug(() -> "Starting first group: " + this.currentGroup);
this.currentGroup.start();
}
this.running = true;
}
public void initialize() {
this.groups.clear();
for (String group : this.groupNames) {
this.groups.add(this.applicationContext.getBean(group + ".group", ContainerGroup.class));
}
if (this.groups.size() > 0) {
this.iterator = this.groups.iterator();
this.currentGroup = this.iterator.next();
this.groups.forEach(grp -> {
Collection<String> ids = grp.getListenerIds();
ids.stream().forEach(id -> {
MessageListenerContainer container = this.registry.getListenerContainer(id);
if (container.getContainerProperties().getIdleEventInterval() == null) {
container.getContainerProperties().setIdleEventInterval(this.defaultIdleEventInterval);
container.setAutoStartup(false);
}
});
});
}
LOGGER.debug(() -> "Found: " + this.groups);
}
@Override
public synchronized void stop() {
this.running = false;
if (this.currentGroup != null) {
this.currentGroup.stop();
this.currentGroup = null;
}
}
@Override
public synchronized boolean isRunning() {
return this.running;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ContainerProperties.java | /*
* Copyright 2016-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import org.aopalliance.aop.Advice;
import org.springframework.aop.framework.Advised;
import org.springframework.aop.framework.ProxyFactory;
import org.springframework.aop.support.AopUtils;
import org.springframework.core.task.AsyncListenableTaskExecutor;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.lang.Nullable;
import org.springframework.scheduling.TaskScheduler;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.TransactionDefinition;
import org.springframework.util.Assert;
import org.springframework.util.CollectionUtils;
/**
* Contains runtime properties for a listener container.
*
* @author Gary Russell
* @author Artem Bilan
* @author Artem Yakshin
* @author Johnny Lim
* @author Lukasz Kaminski
* @author Kyuhyeok Park
*/
public class ContainerProperties extends ConsumerProperties {
/**
* The offset commit behavior enumeration.
*/
public enum AckMode {
/**
* Commit the offset after each record is processed by the listener.
*/
RECORD,
/**
* Commit the offsets of all records returned by the previous poll after they all
* have been processed by the listener.
*/
BATCH,
/**
* Commit pending offsets after
* {@link ContainerProperties#setAckTime(long) ackTime} has elapsed.
*/
TIME,
/**
* Commit pending offsets after
* {@link ContainerProperties#setAckCount(int) ackCount} has been
* exceeded.
*/
COUNT,
/**
* Commit pending offsets after
* {@link ContainerProperties#setAckCount(int) ackCount} has been
* exceeded or after {@link ContainerProperties#setAckTime(long)
* ackTime} has elapsed.
*/
COUNT_TIME,
/**
* Listener is responsible for acking - use a
* {@link org.springframework.kafka.listener.AcknowledgingMessageListener}; acks
* will be queued and offsets will be committed when all the records returned by
* the previous poll have been processed by the listener.
*/
MANUAL,
/**
* Listener is responsible for acking - use a
* {@link org.springframework.kafka.listener.AcknowledgingMessageListener}; the
* commit will be performed immediately if the {@code Acknowledgment} is
* acknowledged on the calling consumer thread; otherwise, the acks will be queued
* and offsets will be committed when all the records returned by the previous
* poll have been processed by the listener; results will be indeterminate if you
* sometimes acknowledge on the calling thread and sometimes not.
*/
MANUAL_IMMEDIATE,
}
/**
* Offset commit behavior during assignment.
* @since 2.3.6
*/
public enum AssignmentCommitOption {
/**
* Always commit the current offset during partition assignment.
*/
ALWAYS,
/**
* Never commit the current offset during partition assignment.
*/
NEVER,
/**
* Commit the current offset during partition assignment when auto.offset.reset is
* 'latest'; transactional if so configured.
*/
LATEST_ONLY,
/**
* Commit the current offset during partition assignment when auto.offset.reset is
* 'latest'; use consumer commit even when transactions are being used.
*/
LATEST_ONLY_NO_TX
}
/**
* Mode for exactly once semantics.
*
* @since 2.5
*/
public enum EOSMode {
/**
* 'transactional.id' fencing (0.11 - 2.4 brokers).
* @deprecated 3.0 and later will require 2.5+ brokers
*/
@Deprecated
V1,
/**
* fetch-offset-request fencing (2.5+ brokers).
*/
V2,
/**
* 'transactional.id' fencing (0.11 - 2.4 brokers).
* @deprecated in favor of {@link #V1}.
*/
@Deprecated
ALPHA(V1),
/**
* fetch-offset-request fencing (2.5+ brokers).
* @deprecated in favor of {@link #V2}.
*/
@Deprecated
BETA(V2);
private final EOSMode mode;
EOSMode() {
this.mode = this;
}
/**
* Create an alias.
* @param v12 the mode for which this is an alias.
*/
EOSMode(EOSMode v12) {
this.mode = v12;
}
/**
* Return the mode or the aliased mode.
* @return the mode.
* @deprecated aliases will be removed in 3.0
*/
@Deprecated
public EOSMode getMode() {
return this.mode;
}
}
/**
* The default {@link #setShutdownTimeout(long) shutDownTimeout} (ms).
*/
public static final long DEFAULT_SHUTDOWN_TIMEOUT = 10_000L;
/**
* The default {@link #setMonitorInterval(int) monitorInterval} (s).
*/
public static final int DEFAULT_MONITOR_INTERVAL = 30;
/**
* The default {@link #setNoPollThreshold(float) noPollThreshold}.
*/
public static final float DEFAULT_NO_POLL_THRESHOLD = 3f;
private static final Duration DEFAULT_CONSUMER_START_TIMEOUT = Duration.ofSeconds(30);
private static final int DEFAULT_ACK_TIME = 5000;
private static final double DEFAULT_IDLE_BEFORE_DATA_MULTIPLIER = 5.0;
private final Map<String, String> micrometerTags = new HashMap<>();
private final List<Advice> adviceChain = new ArrayList<>();
/**
* The ack mode to use when auto ack (in the configuration properties) is false.
* <ul>
* <li>RECORD: Commit the offset after each record has been processed by the
* listener.</li>
* <li>BATCH: Commit the offsets for each batch of records received from the consumer
* when they all have been processed by the listener</li>
* <li>TIME: Commit pending offsets after {@link #setAckTime(long) ackTime} number of
* milliseconds; (should be greater than
* {@code #setPollTimeout(long) pollTimeout}.</li>
* <li>COUNT: Commit pending offsets after at least {@link #setAckCount(int) ackCount}
* number of records have been processed</li>
* <li>COUNT_TIME: Commit pending offsets after {@link #setAckTime(long) ackTime}
* number of milliseconds or at least {@link #setAckCount(int) ackCount} number of
* records have been processed</li>
* <li>MANUAL: Listener is responsible for acking - use a
* {@link org.springframework.kafka.listener.AcknowledgingMessageListener}. Acks will
* be queued and offsets will be committed when all the records returned by the
* previous poll have been processed by the listener.</li>
* <li>MANUAL_IMMDEDIATE: Listener is responsible for acking - use a
* {@link org.springframework.kafka.listener.AcknowledgingMessageListener}. The commit
* will be performed immediately if the {@code Acknowledgment} is acknowledged on the
* calling consumer thread. Otherwise, the acks will be queued and offsets will be
* committed when all the records returned by the previous poll have been processed by
* the listener. Results will be indeterminate if you sometimes acknowledge on the
* calling thread and sometimes not.</li>
* </ul>
*/
private AckMode ackMode = AckMode.BATCH;
/**
* The number of outstanding record count after which offsets should be
* committed when {@link AckMode#COUNT} or {@link AckMode#COUNT_TIME} is being
* used.
*/
private int ackCount = 1;
/**
* The time (ms) after which outstanding offsets should be committed when
* {@link AckMode#TIME} or {@link AckMode#COUNT_TIME} is being used. Should be
* larger than
*/
private long ackTime = DEFAULT_ACK_TIME;
/**
* The message listener; must be a {@link org.springframework.kafka.listener.MessageListener}
* or {@link org.springframework.kafka.listener.AcknowledgingMessageListener}.
*/
private Object messageListener;
/**
* The executor for threads that poll the consumer.
*/
private AsyncListenableTaskExecutor consumerTaskExecutor;
/**
* The timeout for shutting down the container. This is the maximum amount of
* time that the invocation to {@code #stop(Runnable)} will block for, before
* returning.
*/
private long shutdownTimeout = DEFAULT_SHUTDOWN_TIMEOUT;
private Long idleEventInterval;
private Long idlePartitionEventInterval;
private double idleBeforeDataMultiplier = DEFAULT_IDLE_BEFORE_DATA_MULTIPLIER;
private PlatformTransactionManager transactionManager;
private int monitorInterval = DEFAULT_MONITOR_INTERVAL;
private TaskScheduler scheduler;
private float noPollThreshold = DEFAULT_NO_POLL_THRESHOLD;
private boolean logContainerConfig;
private boolean missingTopicsFatal = false;
private long idleBetweenPolls;
private boolean micrometerEnabled = true;
private Duration consumerStartTimeout = DEFAULT_CONSUMER_START_TIMEOUT;
private Boolean subBatchPerPartition;
private AssignmentCommitOption assignmentCommitOption = AssignmentCommitOption.LATEST_ONLY_NO_TX;
private boolean deliveryAttemptHeader;
private EOSMode eosMode = EOSMode.V2;
private TransactionDefinition transactionDefinition;
private boolean stopContainerWhenFenced;
private boolean stopImmediate;
private boolean asyncAcks;
/**
* Create properties for a container that will subscribe to the specified topics.
* @param topics the topics.
*/
public ContainerProperties(String... topics) {
super(topics);
}
/**
* Create properties for a container that will subscribe to topics matching the
* specified pattern. The framework will create a container that subscribes to all
* topics matching the specified pattern to get dynamically assigned partitions. The
* pattern matching will be performed periodically against topics existing at the time
* of check.
* @param topicPattern the pattern.
* @see org.apache.kafka.clients.CommonClientConfigs#METADATA_MAX_AGE_CONFIG
*/
public ContainerProperties(Pattern topicPattern) {
super(topicPattern);
}
/**
* Create properties for a container that will assign itself the provided topic
* partitions.
* @param topicPartitions the topic partitions.
*/
public ContainerProperties(TopicPartitionOffset... topicPartitions) {
super(topicPartitions);
}
/**
* Set the message listener; must be a {@link org.springframework.kafka.listener.MessageListener}
* or {@link org.springframework.kafka.listener.AcknowledgingMessageListener}.
* @param messageListener the listener.
*/
public void setMessageListener(Object messageListener) {
this.messageListener = messageListener;
adviseListenerIfNeeded();
}
/**
* Set the ack mode to use when auto ack (in the configuration properties) is false.
* <ul>
* <li>RECORD: Commit the offset after each record has been processed by the
* listener.</li>
* <li>BATCH: Commit the offsets for each batch of records received from the consumer
* when they all have been processed by the listener</li>
* <li>TIME: Commit pending offsets after {@link #setAckTime(long) ackTime} number of
* milliseconds; (should be greater than
* {@code #setPollTimeout(long) pollTimeout}.</li>
* <li>COUNT: Commit pending offsets after at least {@link #setAckCount(int) ackCount}
* number of records have been processed</li>
* <li>COUNT_TIME: Commit pending offsets after {@link #setAckTime(long) ackTime}
* number of milliseconds or at least {@link #setAckCount(int) ackCount} number of
* records have been processed</li>
* <li>MANUAL: Listener is responsible for acking - use a
* {@link org.springframework.kafka.listener.AcknowledgingMessageListener}. Acks will
* be queued and offsets will be committed when all the records returned by the
* previous poll have been processed by the listener.</li>
* <li>MANUAL_IMMDEDIATE: Listener is responsible for acking - use a
* {@link org.springframework.kafka.listener.AcknowledgingMessageListener}. The commit
* will be performed immediately if the {@code Acknowledgment} is acknowledged on the
* calling consumer thread. Otherwise, the acks will be queued and offsets will be
* committed when all the records returned by the previous poll have been processed by
* the listener. Results will be indeterminate if you sometimes acknowledge on the
* calling thread and sometimes not.</li>
* </ul>
* @param ackMode the {@link AckMode}; default BATCH.
* @see #setTransactionManager(PlatformTransactionManager)
*/
public void setAckMode(AckMode ackMode) {
Assert.notNull(ackMode, "'ackMode' cannot be null");
this.ackMode = ackMode;
}
/**
* Set the number of outstanding record count after which offsets should be
* committed when {@link AckMode#COUNT} or {@link AckMode#COUNT_TIME} is being used.
* @param count the count
*/
public void setAckCount(int count) {
Assert.state(count > 0, "'ackCount' must be > 0");
this.ackCount = count;
}
/**
* Set the time (ms) after which outstanding offsets should be committed when
* {@link AckMode#TIME} or {@link AckMode#COUNT_TIME} is being used. Should be
* larger than
* @param ackTime the time
*/
public void setAckTime(long ackTime) {
Assert.state(ackTime > 0, "'ackTime' must be > 0");
this.ackTime = ackTime;
}
/**
* Set the executor for threads that poll the consumer.
* @param consumerTaskExecutor the executor
*/
public void setConsumerTaskExecutor(@Nullable AsyncListenableTaskExecutor consumerTaskExecutor) {
this.consumerTaskExecutor = consumerTaskExecutor;
}
/**
* Set the timeout for shutting down the container. This is the maximum amount of
* time that the invocation to {@code #stop(Runnable)} will block for, before
* returning; default {@value #DEFAULT_SHUTDOWN_TIMEOUT}.
* @param shutdownTimeout the shutdown timeout.
*/
public void setShutdownTimeout(long shutdownTimeout) {
this.shutdownTimeout = shutdownTimeout;
}
/**
* Set the timeout for commitSync operations (if {@link #isSyncCommits()}. Overrides
* the default api timeout property. In order of precedence:
* <ul>
* <li>this property</li>
* <li>{@code ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG} in
* {@link #setKafkaConsumerProperties(java.util.Properties)}</li>
* <li>{@code ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG} in the consumer factory
* properties</li>
* <li>60 seconds</li>
* </ul>
* @param syncCommitTimeout the timeout.
* @see #setSyncCommits(boolean)
*/
@Override
public void setSyncCommitTimeout(@Nullable Duration syncCommitTimeout) { // NOSONAR - not useless; enhanced javadoc
super.setSyncCommitTimeout(syncCommitTimeout);
}
/**
* Set the idle event interval; when set, an event is emitted if a poll returns
* no records and this interval has elapsed since a record was returned.
* @param idleEventInterval the interval.
* @see #setIdleBeforeDataMultiplier(double)
*/
public void setIdleEventInterval(@Nullable Long idleEventInterval) {
this.idleEventInterval = idleEventInterval;
}
/**
* Multiply the {@link #setIdleEventInterval(Long)} by this value until at least
* one record is received. Default 5.0.
* @param idleBeforeDataMultiplier false to allow publishing.
* @since 2.8
* @see #setIdleEventInterval(Long)
*/
public void setIdleBeforeDataMultiplier(double idleBeforeDataMultiplier) {
this.idleBeforeDataMultiplier = idleBeforeDataMultiplier;
}
/**
* Set the idle partition event interval; when set, an event is emitted if a poll returns
* no records for a partition and this interval has elapsed since a record was returned.
* @param idlePartitionEventInterval the interval.
*/
public void setIdlePartitionEventInterval(@Nullable Long idlePartitionEventInterval) {
this.idlePartitionEventInterval = idlePartitionEventInterval;
}
public AckMode getAckMode() {
return this.ackMode;
}
public int getAckCount() {
return this.ackCount;
}
public long getAckTime() {
return this.ackTime;
}
public Object getMessageListener() {
return this.messageListener;
}
/**
* Return the consumer task executor.
* @return the executor.
*/
@Nullable
public AsyncListenableTaskExecutor getConsumerTaskExecutor() {
return this.consumerTaskExecutor;
}
public long getShutdownTimeout() {
return this.shutdownTimeout;
}
/**
* Return the idle event interval.
* @return the interval.
*/
@Nullable
public Long getIdleEventInterval() {
return this.idleEventInterval;
}
/**
* Multiply the {@link #setIdleEventInterval(Long)} by this value until at least
* one record is received. Default 5.0.
* @return the noIdleBeforeData.
* @since 2.8
* @see #getIdleEventInterval()
*/
public double getIdleBeforeDataMultiplier() {
return this.idleBeforeDataMultiplier;
}
/**
* Return the idle partition event interval.
* @return the interval.
*/
@Nullable
public Long getIdlePartitionEventInterval() {
return this.idlePartitionEventInterval;
}
@Nullable
public PlatformTransactionManager getTransactionManager() {
return this.transactionManager;
}
/**
* Set the transaction manager to start a transaction; offsets are committed with
* semantics equivalent to {@link AckMode#RECORD} and {@link AckMode#BATCH} depending
* on the listener type (record or batch).
* @param transactionManager the transaction manager.
* @since 1.3
* @see #setAckMode(AckMode)
*/
public void setTransactionManager(@Nullable PlatformTransactionManager transactionManager) {
this.transactionManager = transactionManager;
}
public int getMonitorInterval() {
return this.monitorInterval;
}
/**
* The interval between checks for a non-responsive consumer in
* seconds; default {@value #DEFAULT_MONITOR_INTERVAL}.
* @param monitorInterval the interval.
* @since 1.3.1
*/
public void setMonitorInterval(int monitorInterval) {
this.monitorInterval = monitorInterval;
}
/**
* Return the task scheduler, if present.
* @return the scheduler.
*/
@Nullable
public TaskScheduler getScheduler() {
return this.scheduler;
}
/**
* A scheduler used with the monitor interval.
* @param scheduler the scheduler.
* @since 1.3.1
* @see #setMonitorInterval(int)
*/
public void setScheduler(@Nullable TaskScheduler scheduler) {
this.scheduler = scheduler;
}
public float getNoPollThreshold() {
return this.noPollThreshold;
}
/**
* If the time since the last poll / {@link #getPollTimeout() poll timeout}
* exceeds this value, a NonResponsiveConsumerEvent is published.
* This value should be more than 1.0 to avoid a race condition that can cause
* spurious events to be published.
* Default {@value #DEFAULT_NO_POLL_THRESHOLD}.
* @param noPollThreshold the threshold
* @since 1.3.1
*/
public void setNoPollThreshold(float noPollThreshold) {
this.noPollThreshold = noPollThreshold;
}
/**
* Log the container configuration if true (INFO).
* @return true to log.
* @since 2.1.1
*/
public boolean isLogContainerConfig() {
return this.logContainerConfig;
}
/**
* Set to true to instruct each container to log this configuration.
* @param logContainerConfig true to log.
* @since 2.1.1
*/
public void setLogContainerConfig(boolean logContainerConfig) {
this.logContainerConfig = logContainerConfig;
}
/**
* If true, the container won't start if any of the configured topics are not present
* on the broker. Does not apply when topic patterns are configured. Default false.
* @return the missingTopicsFatal.
* @since 2.2
*/
public boolean isMissingTopicsFatal() {
return this.missingTopicsFatal;
}
/**
* Set to true to prevent the container from starting if any of the configured topics
* are not present on the broker. Does not apply when topic patterns are configured.
* Default false;
* @param missingTopicsFatal the missingTopicsFatal.
* @since 2.2
*/
public void setMissingTopicsFatal(boolean missingTopicsFatal) {
this.missingTopicsFatal = missingTopicsFatal;
}
/**
* The sleep interval in milliseconds used in the main loop between
* {@link org.apache.kafka.clients.consumer.Consumer#poll(Duration)} calls.
* Defaults to {@code 0} - no idling.
* @param idleBetweenPolls the interval to sleep between polling cycles.
* @since 2.3
*/
public void setIdleBetweenPolls(long idleBetweenPolls) {
this.idleBetweenPolls = idleBetweenPolls;
}
public long getIdleBetweenPolls() {
return this.idleBetweenPolls;
}
public boolean isMicrometerEnabled() {
return this.micrometerEnabled;
}
/**
* Set to false to disable the Micrometer listener timers. Default true.
* @param micrometerEnabled false to disable.
* @since 2.3
*/
public void setMicrometerEnabled(boolean micrometerEnabled) {
this.micrometerEnabled = micrometerEnabled;
}
/**
* Set additional tags for the Micrometer listener timers.
* @param tags the tags.
* @since 2.3
*/
public void setMicrometerTags(Map<String, String> tags) {
if (tags != null) {
this.micrometerTags.putAll(tags);
}
}
public Map<String, String> getMicrometerTags() {
return Collections.unmodifiableMap(this.micrometerTags);
}
public Duration getConsumerStartTimeout() {
return this.consumerStartTimeout;
}
@Deprecated
public Duration getConsumerStartTimout() {
return this.consumerStartTimeout;
}
/**
* Set the timeout to wait for a consumer thread to start before logging
* an error. Default 30 seconds.
* @param consumerStartTimeout the consumer start timeout.
*/
public void setConsumerStartTimeout(Duration consumerStartTimeout) {
Assert.notNull(consumerStartTimeout, "'consumerStartTimout' cannot be null");
this.consumerStartTimeout = consumerStartTimeout;
}
@Deprecated
public void setConsumerStartTimout(Duration consumerStartTimeout) {
setConsumerStartTimeout(consumerStartTimeout);
}
/**
* Return whether to split batches by partition.
* @return subBatchPerPartition.
* @since 2.3.2
*/
public boolean isSubBatchPerPartition() {
return this.subBatchPerPartition == null ? false : this.subBatchPerPartition;
}
/**
* Return whether to split batches by partition; null if not set.
* @return subBatchPerPartition.
* @since 2.5
*/
@Nullable
public Boolean getSubBatchPerPartition() {
return this.subBatchPerPartition;
}
/**
* When using a batch message listener whether to dispatch records by partition (with
* a transaction for each sub batch if transactions are in use) or the complete batch
* received by the {@code poll()}. Useful when using transactions to enable zombie
* fencing, by using a {@code transactional.id} that is unique for each
* group/topic/partition. Defaults to true when using transactions with
* {@link #setEosMode(EOSMode) EOSMode.ALPHA} and false when not using transactions or
* with {@link #setEosMode(EOSMode) EOSMode.BETA}.
* @param subBatchPerPartition true for a separate transaction for each partition.
* @since 2.3.2
*/
public void setSubBatchPerPartition(@Nullable Boolean subBatchPerPartition) {
this.subBatchPerPartition = subBatchPerPartition;
}
public AssignmentCommitOption getAssignmentCommitOption() {
return this.assignmentCommitOption;
}
/**
* Set the assignment commit option. Default
* {@link AssignmentCommitOption#LATEST_ONLY_NO_TX}.
* @param assignmentCommitOption the option.
* @since 2.3.6
*/
public void setAssignmentCommitOption(AssignmentCommitOption assignmentCommitOption) {
Assert.notNull(assignmentCommitOption, "'assignmentCommitOption' cannot be null");
this.assignmentCommitOption = assignmentCommitOption;
}
public boolean isDeliveryAttemptHeader() {
return this.deliveryAttemptHeader;
}
/**
* Set to true to populate the
* {@link org.springframework.kafka.support.KafkaHeaders#DELIVERY_ATTEMPT} header when
* the error handler or after rollback processor implements
* {@code DeliveryAttemptAware}. There is a small overhead so this is false by
* default.
* @param deliveryAttemptHeader true to populate
* @since 2.5
*/
public void setDeliveryAttemptHeader(boolean deliveryAttemptHeader) {
this.deliveryAttemptHeader = deliveryAttemptHeader;
}
/**
* Get the exactly once semantics mode.
* @return the mode.
* @since 2.5
* @see #setEosMode(EOSMode)
*/
public EOSMode getEosMode() {
return this.eosMode;
}
/**
* Set the exactly once semantics mode. When {@link EOSMode#V1} a producer per
* group/topic/partition is used (enabling 'transactional.id fencing`).
* {@link EOSMode#V2} enables fetch-offset-request fencing, and requires brokers 2.5
* or later. With the 2.6 client, the default is now V2 because the 2.6 client can
* automatically fall back to ALPHA.
* IMPORTANT the 3.0 clients cannot be used with {@link EOSMode#V2} unless the broker
* is 2.5 or higher.
* @param eosMode the mode; default V2.
* @since 2.5
*/
public void setEosMode(EOSMode eosMode) {
Assert.notNull(eosMode, "'eosMode' cannot be null");
this.eosMode = eosMode;
}
/**
* Get the transaction definition.
* @return the definition.
* @since 2.5.4
*/
@Nullable
public TransactionDefinition getTransactionDefinition() {
return this.transactionDefinition;
}
/**
* Set a transaction definition with properties (e.g. timeout) that will be copied to
* the container's transaction template. Note that this is only generally useful when
* used with a {@link #setTransactionManager(PlatformTransactionManager)
* PlatformTransactionManager} that supports a custom definition; this does NOT
* include the {@link org.springframework.kafka.transaction.KafkaTransactionManager}
* which has no concept of transaction timeout. It can be useful to start, for example
* a database transaction, in the container, rather than using {@code @Transactional}
* on the listener, because then a record interceptor, or filter in a listener adapter
* can participate in the transaction.
* @param transactionDefinition the definition.
* @since 2.5.4
* @see #setTransactionManager(PlatformTransactionManager)
*/
public void setTransactionDefinition(@Nullable TransactionDefinition transactionDefinition) {
this.transactionDefinition = transactionDefinition;
}
/**
* A chain of listener {@link Advice}s.
* @return the adviceChain.
* @since 2.5.6
*/
public Advice[] getAdviceChain() {
return this.adviceChain.toArray(new Advice[0]);
}
/**
* Set a chain of listener {@link Advice}s; must not be null or have null elements.
* @param adviceChain the adviceChain to set.
* @since 2.5.6
*/
public void setAdviceChain(Advice... adviceChain) {
Assert.notNull(adviceChain, "'adviceChain' cannot be null");
Assert.noNullElements(adviceChain, "'adviceChain' cannot have null elements");
this.adviceChain.clear();
this.adviceChain.addAll(Arrays.asList(adviceChain));
if (this.messageListener != null) {
adviseListenerIfNeeded();
}
}
/**
* When true, the container will stop after a
* {@link org.apache.kafka.common.errors.ProducerFencedException}.
* @return the stopContainerWhenFenced
* @since 2.5.8
*/
public boolean isStopContainerWhenFenced() {
return this.stopContainerWhenFenced;
}
/**
* Set to true to stop the container when a
* {@link org.apache.kafka.common.errors.ProducerFencedException} is thrown.
* Currently, there is no way to determine if such an exception is thrown due to a
* rebalance Vs. a timeout. We therefore cannot call the after rollback processor. The
* best solution is to ensure that the {@code transaction.timeout.ms} is large enough
* so that transactions don't time out.
* @param stopContainerWhenFenced true to stop the container.
* @since 2.5.8
*/
public void setStopContainerWhenFenced(boolean stopContainerWhenFenced) {
this.stopContainerWhenFenced = stopContainerWhenFenced;
}
/**
* When true, the container will be stopped immediately after processing the current record.
* @return true to stop immediately.
* @since 2.5.11
*/
public boolean isStopImmediate() {
return this.stopImmediate;
}
/**
* Set to true to stop the container after processing the current record (when stop()
* is called). When false (default), the container will stop after all the results of
* the previous poll are processed.
* @param stopImmediate true to stop after the current record.
* @since 2.5.11
*/
public void setStopImmediate(boolean stopImmediate) {
this.stopImmediate = stopImmediate;
}
/**
* When true, async manual acknowledgments are supported.
* @return true for async ack support.
* @since 2.8
*/
public boolean isAsyncAcks() {
return this.asyncAcks;
}
/**
* Set to true to support asynchronous record acknowledgments. Only applies with
* {@link AckMode#MANUAL} or {@link AckMode#MANUAL_IMMEDIATE}. Out of order offset
* commits are deferred until all previous offsets in the partition have been
* committed. The consumer is paused, if necessary, until all acks have been
* completed.
* @param asyncAcks true to use async acks.
* @since 2.8
*/
public void setAsyncAcks(boolean asyncAcks) {
this.asyncAcks = asyncAcks;
}
private void adviseListenerIfNeeded() {
if (!CollectionUtils.isEmpty(this.adviceChain)) {
if (AopUtils.isAopProxy(this.messageListener)) {
Advised advised = (Advised) this.messageListener;
this.adviceChain.forEach(advised::removeAdvice);
this.adviceChain.forEach(advised::addAdvice);
}
else {
ProxyFactory pf = new ProxyFactory(this.messageListener);
this.adviceChain.forEach(pf::addAdvice);
this.messageListener = pf.getProxy();
}
}
}
@Override
public String toString() {
return "ContainerProperties ["
+ renderProperties()
+ "\n ackMode=" + this.ackMode
+ "\n ackCount=" + this.ackCount
+ "\n ackTime=" + this.ackTime
+ "\n messageListener=" + this.messageListener
+ (this.consumerTaskExecutor != null
? "\n consumerTaskExecutor=" + this.consumerTaskExecutor
: "")
+ "\n shutdownTimeout=" + this.shutdownTimeout
+ "\n idleEventInterval="
+ (this.idleEventInterval == null ? "not enabled" : this.idleEventInterval)
+ "\n idlePartitionEventInterval="
+ (this.idlePartitionEventInterval == null ? "not enabled" : this.idlePartitionEventInterval)
+ (this.transactionManager != null
? "\n transactionManager=" + this.transactionManager
: "")
+ "\n monitorInterval=" + this.monitorInterval
+ (this.scheduler != null ? "\n scheduler=" + this.scheduler : "")
+ "\n noPollThreshold=" + this.noPollThreshold
+ "\n subBatchPerPartition=" + this.subBatchPerPartition
+ "\n assignmentCommitOption=" + this.assignmentCommitOption
+ "\n deliveryAttemptHeader=" + this.deliveryAttemptHeader
+ "\n eosMode=" + this.eosMode
+ "\n transactionDefinition=" + this.transactionDefinition
+ "\n stopContainerWhenFenced=" + this.stopContainerWhenFenced
+ "\n stopImmediate=" + this.stopImmediate
+ "\n asyncAcks=" + this.asyncAcks
+ "\n idleBeforeDataMultiplier" + this.idleBeforeDataMultiplier
+ "\n]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ContainerStoppingBatchErrorHandler.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.concurrent.Executor;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.core.task.SimpleAsyncTaskExecutor;
import org.springframework.kafka.KafkaException;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
/**
* A container error handler that stops the container after an exception
* is thrown by the listener.
*
* @author Gary Russell
* @since 2.1
* @deprecated in favor of {@link CommonContainerStoppingErrorHandler}.
*
*/
@Deprecated
public class ContainerStoppingBatchErrorHandler extends KafkaExceptionLogLevelAware
implements ContainerAwareBatchErrorHandler {
private final Executor executor;
/**
* Construct an instance with a {@link SimpleAsyncTaskExecutor}.
*/
public ContainerStoppingBatchErrorHandler() {
this.executor = new SimpleAsyncTaskExecutor();
}
/**
* Construct an instance with the provided {@link Executor}.
* @param executor the executor.
*/
public ContainerStoppingBatchErrorHandler(Executor executor) {
Assert.notNull(executor, "'executor' cannot be null");
this.executor = executor;
}
@Override
public void handle(Exception thrownException, @Nullable ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container) {
this.executor.execute(() -> container.stop());
// isRunning is false before the container.stop() waits for listener thread
int n = 0;
while (container.isRunning() && n++ < 100) { // NOSONAR magic #
try {
Thread.sleep(100); // NOSONAR magic #
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
throw new KafkaException("Stopped container", getLogLevel(), thrownException);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ContainerStoppingErrorHandler.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import java.util.concurrent.Executor;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.core.task.SimpleAsyncTaskExecutor;
import org.springframework.kafka.KafkaException;
import org.springframework.util.Assert;
/**
* A container error handler that stops the container after an exception
* is thrown by the listener.
*
* @author Gary Russell
* @since 2.1
* @deprecated in favor of {@link CommonContainerStoppingErrorHandler}.
*
*/
@Deprecated
public class ContainerStoppingErrorHandler extends KafkaExceptionLogLevelAware implements ContainerAwareErrorHandler {
private final Executor executor;
/**
* Construct an instance with a default {@link SimpleAsyncTaskExecutor}.
*/
public ContainerStoppingErrorHandler() {
this.executor = new SimpleAsyncTaskExecutor();
}
/**
* Construct an instance with the provided {@link Executor}.
* @param executor the executor.
*/
public ContainerStoppingErrorHandler(Executor executor) {
Assert.notNull(executor, "'executor' cannot be null");
this.executor = executor;
}
@Override
public void handle(Exception thrownException, List<ConsumerRecord<?, ?>> records, Consumer<?, ?> consumer,
MessageListenerContainer container) {
this.executor.execute(() -> container.stop());
// isRunning is false before the container.stop() waits for listener thread
int n = 0;
while (container.isRunning() && n++ < 100) { // NOSONAR magic #
try {
Thread.sleep(100); // NOSONAR magic #
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
throw new KafkaException("Stopped container", getLogLevel(), thrownException);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/DeadLetterPublishingRecoverer.java | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.BiFunction;
import java.util.function.Function;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.KafkaException;
import org.springframework.kafka.core.KafkaOperations;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.KafkaUtils;
import org.springframework.kafka.support.SendResult;
import org.springframework.kafka.support.serializer.DeserializationException;
import org.springframework.kafka.support.serializer.SerializationUtils;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.ObjectUtils;
import org.springframework.util.concurrent.ListenableFuture;
/**
* A {@link ConsumerRecordRecoverer} that publishes a failed record to a dead-letter
* topic.
*
* @author Gary Russell
* @author Tomaz Fernandes
* @since 2.2
*
*/
public class DeadLetterPublishingRecoverer extends ExceptionClassifier implements ConsumerAwareRecordRecoverer {
private static final String DEPRECATION = "deprecation";
private static final BiFunction<ConsumerRecord<?, ?>, Exception, Headers> DEFAULT_HEADERS_FUNCTION =
(rec, ex) -> null;
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); // NOSONAR
private static final BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition>
DEFAULT_DESTINATION_RESOLVER = (cr, e) -> new TopicPartition(cr.topic() + ".DLT", cr.partition());
private static final long FIVE = 5L;
private static final long THIRTY = 30L;
private final HeaderNames headerNames = getHeaderNames();
private final boolean transactional;
private final BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> destinationResolver;
private final Function<ProducerRecord<?, ?>, KafkaOperations<?, ?>> templateResolver;
private final EnumSet<HeaderNames.HeadersToAdd> whichHeaders = EnumSet.allOf(HeaderNames.HeadersToAdd.class);
private boolean retainExceptionHeader;
private BiFunction<ConsumerRecord<?, ?>, Exception, Headers> headersFunction = DEFAULT_HEADERS_FUNCTION;
private boolean verifyPartition = true;
private Duration partitionInfoTimeout = Duration.ofSeconds(FIVE);
private Duration waitForSendResultTimeout = Duration.ofSeconds(THIRTY);
private boolean appendOriginalHeaders = true;
private boolean failIfSendResultIsError = true;
private boolean throwIfNoDestinationReturned = false;
private long timeoutBuffer = Duration.ofSeconds(FIVE).toMillis();
private boolean stripPreviousExceptionHeaders = true;
private boolean skipSameTopicFatalExceptions = true;
private ExceptionHeadersCreator exceptionHeadersCreator = this::addExceptionInfoHeaders;
/**
* Create an instance with the provided template and a default destination resolving
* function that returns a TopicPartition based on the original topic (appended with ".DLT")
* from the failed record, and the same partition as the failed record. Therefore the
* dead-letter topic must have at least as many partitions as the original topic.
* @param template the {@link KafkaOperations} to use for publishing.
*/
public DeadLetterPublishingRecoverer(KafkaOperations<? extends Object, ? extends Object> template) {
this(template, DEFAULT_DESTINATION_RESOLVER);
}
/**
* Create an instance with the provided template and destination resolving function,
* that receives the failed consumer record and the exception and returns a
* {@link TopicPartition}. If the partition in the {@link TopicPartition} is less than
* 0, no partition is set when publishing to the topic.
* @param template the {@link KafkaOperations} to use for publishing.
* @param destinationResolver the resolving function.
*/
public DeadLetterPublishingRecoverer(KafkaOperations<? extends Object, ? extends Object> template,
BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> destinationResolver) {
this(Collections.singletonMap(Object.class, template), destinationResolver);
}
/**
* Create an instance with the provided templates and a default destination resolving
* function that returns a TopicPartition based on the original topic (appended with
* ".DLT") from the failed record, and the same partition as the failed record.
* Therefore the dead-letter topic must have at least as many partitions as the
* original topic. The templates map keys are classes and the value the corresponding
* template to use for objects (producer record values) of that type. A
* {@link java.util.LinkedHashMap} is recommended when there is more than one
* template, to ensure the map is traversed in order. To send records with a null
* value, add a template with the {@link Void} class as a key; otherwise the first
* template from the map values iterator will be used.
* @param templates the {@link KafkaOperations}s to use for publishing.
*/
public DeadLetterPublishingRecoverer(Map<Class<?>, KafkaOperations<? extends Object, ? extends Object>> templates) {
this(templates, DEFAULT_DESTINATION_RESOLVER);
}
/**
* Create an instance with the provided templates and destination resolving function,
* that receives the failed consumer record and the exception and returns a
* {@link TopicPartition}. If the partition in the {@link TopicPartition} is less than
* 0, no partition is set when publishing to the topic. The templates map keys are
* classes and the value the corresponding template to use for objects (producer
* record values) of that type. A {@link java.util.LinkedHashMap} is recommended when
* there is more than one template, to ensure the map is traversed in order. To send
* records with a null value, add a template with the {@link Void} class as a key;
* otherwise the first template from the map values iterator will be used.
* @param templates the {@link KafkaOperations}s to use for publishing.
* @param destinationResolver the resolving function.
*/
@SuppressWarnings("unchecked")
public DeadLetterPublishingRecoverer(Map<Class<?>, KafkaOperations<? extends Object, ? extends Object>> templates,
BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> destinationResolver) {
Assert.isTrue(!ObjectUtils.isEmpty(templates), "At least one template is required");
Assert.notNull(destinationResolver, "The destinationResolver cannot be null");
KafkaOperations<?, ?> firstTemplate = templates.values().iterator().next();
this.templateResolver = templates.size() == 1
? producerRecord -> firstTemplate
: producerRecord -> findTemplateForValue(producerRecord.value(), templates);
this.transactional = firstTemplate.isTransactional();
Boolean tx = this.transactional;
Assert.isTrue(templates.values()
.stream()
.map(t -> t.isTransactional())
.allMatch(t -> t.equals(tx)), "All templates must have the same setting for transactional");
this.destinationResolver = destinationResolver;
}
/**
* Create an instance with a template resolving function that receives the failed
* consumer record and the exception and returns a {@link KafkaOperations} and a
* flag on whether or not the publishing from this instance will be transactional
* or not. Also receives a destination resolving function that works similarly but
* returns a {@link TopicPartition} instead. If the partition in the {@link TopicPartition}
* is less than 0, no partition is set when publishing to the topic.
*
* @param templateResolver the function that resolver the {@link KafkaOperations} to use for publishing.
* @param transactional whether or not publishing by this instance should be transactional
* @param destinationResolver the resolving function.
* @since 2.7
*/
public DeadLetterPublishingRecoverer(Function<ProducerRecord<?, ?>, KafkaOperations<?, ?>> templateResolver,
boolean transactional,
BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> destinationResolver) {
Assert.notNull(templateResolver, "The templateResolver cannot be null");
Assert.notNull(destinationResolver, "The destinationResolver cannot be null");
this.transactional = transactional;
this.destinationResolver = destinationResolver;
this.templateResolver = templateResolver;
}
/**
* Set to true to retain a Java serialized {@link DeserializationException} header. By
* default, such headers are removed from the published record, unless both key and
* value deserialization exceptions occur, in which case, the DLT_* headers are
* created from the value exception and the key exception header is retained.
* @param retainExceptionHeader true to retain the
* @since 2.5
*/
public void setRetainExceptionHeader(boolean retainExceptionHeader) {
this.retainExceptionHeader = retainExceptionHeader;
}
/**
* Set a function which will be called to obtain additional headers to add to the
* published record.
* @param headersFunction the headers function.
* @since 2.5.4
* @see #addHeadersFunction(BiFunction)
*/
public void setHeadersFunction(BiFunction<ConsumerRecord<?, ?>, Exception, Headers> headersFunction) {
Assert.notNull(headersFunction, "'headersFunction' cannot be null");
if (!this.headersFunction.equals(DEFAULT_HEADERS_FUNCTION)) {
this.logger.warn(() -> "Replacing custom headers function: " + this.headersFunction
+ ", consider using addHeadersFunction() if you need multiple functions");
}
this.headersFunction = headersFunction;
}
/**
* Set to false to disable partition verification. When true, verify that the
* partition returned by the resolver actually exists. If not, set the
* {@link ProducerRecord#partition()} to null, allowing the producer to determine the
* destination partition.
* @param verifyPartition false to disable.
* @since 2.7
* @see #setPartitionInfoTimeout(Duration)
*/
public void setVerifyPartition(boolean verifyPartition) {
this.verifyPartition = verifyPartition;
}
/**
* Time to wait for partition information when verifying. Default is 5 seconds.
* @param partitionInfoTimeout the timeout.
* @since 2.7
* @see #setVerifyPartition(boolean)
*/
public void setPartitionInfoTimeout(Duration partitionInfoTimeout) {
Assert.notNull(partitionInfoTimeout, "'partitionInfoTimeout' cannot be null");
this.partitionInfoTimeout = partitionInfoTimeout;
}
/**
* Set to false if you don't want to append the current "original" headers (topic,
* partition etc.) if they are already present. When false, only the first "original"
* headers are retained.
* @param replaceOriginalHeaders set to false not to replace.
* @since 2.7
* @deprecated in favor of {@link #setAppendOriginalHeaders(boolean)}.
*/
@Deprecated
public void setReplaceOriginalHeaders(boolean replaceOriginalHeaders) {
this.appendOriginalHeaders = replaceOriginalHeaders;
}
/**
* Set to false if you don't want to append the current "original" headers (topic,
* partition etc.) if they are already present. When false, only the first "original"
* headers are retained.
* @param appendOriginalHeaders set to false not to replace.
* @since 2.7.9
*/
public void setAppendOriginalHeaders(boolean appendOriginalHeaders) {
this.appendOriginalHeaders = appendOriginalHeaders;
}
/**
* Set to true to throw an exception if the destination resolver function returns
* a null TopicPartition.
* @param throwIfNoDestinationReturned true to enable.
* @since 2.7
*/
public void setThrowIfNoDestinationReturned(boolean throwIfNoDestinationReturned) {
this.throwIfNoDestinationReturned = throwIfNoDestinationReturned;
}
/**
* Set to true to enable waiting for the send result and throw an exception if it fails.
* It will wait for the milliseconds specified in waitForSendResultTimeout for the result.
* @param failIfSendResultIsError true to enable.
* @since 2.7
* @see #setWaitForSendResultTimeout(Duration)
*/
public void setFailIfSendResultIsError(boolean failIfSendResultIsError) {
this.failIfSendResultIsError = failIfSendResultIsError;
}
/**
* Set the minumum time to wait for message sending. Default is the producer
* configuration {@code delivery.timeout.ms} plus the {@link #setTimeoutBuffer(long)}.
* @param waitForSendResultTimeout the timeout.
* @since 2.7
* @see #setFailIfSendResultIsError(boolean)
* @see #setTimeoutBuffer(long)
*/
public void setWaitForSendResultTimeout(Duration waitForSendResultTimeout) {
this.waitForSendResultTimeout = waitForSendResultTimeout;
}
/**
* Set the number of milliseconds to add to the producer configuration {@code delivery.timeout.ms}
* property to avoid timing out before the Kafka producer. Default 5000.
* @param buffer the buffer.
* @since 2.7
* @see #setWaitForSendResultTimeout(Duration)
*/
public void setTimeoutBuffer(long buffer) {
this.timeoutBuffer = buffer;
}
/**
* Set to false to retain previous exception headers as well as headers for the
* current exception. Default is true, which means only the current headers are
* retained; setting it to false this can cause a growth in record size when a record
* is republished many times.
* @param stripPreviousExceptionHeaders false to retain all.
* @since 2.7.9
*/
public void setStripPreviousExceptionHeaders(boolean stripPreviousExceptionHeaders) {
this.stripPreviousExceptionHeaders = stripPreviousExceptionHeaders;
}
/**
* Set to false if you want to forward the record to the same topic even though
* the exception is fatal by this class' classification, e.g. to handle this scenario
* in a different layer.
* @param skipSameTopicFatalExceptions false to forward in this scenario.
*/
public void setSkipSameTopicFatalExceptions(boolean skipSameTopicFatalExceptions) {
this.skipSameTopicFatalExceptions = skipSameTopicFatalExceptions;
}
/**
* Set a {@link ExceptionHeadersCreator} implementation to completely take over
* setting the exception headers in the output record. Disables all headers that are
* set by default.
* @param headersCreator the creator.
* @since 2.8.4
*/
public void setExceptionHeadersCreator(ExceptionHeadersCreator headersCreator) {
Assert.notNull(headersCreator, "'headersCreator' cannot be null");
this.exceptionHeadersCreator = headersCreator;
}
/**
* Clear the header inclusion bit for the header name.
* @param headers the headers to clear.
* @since 2.8.4
*/
public void excludeHeader(HeaderNames.HeadersToAdd... headers) {
Assert.notNull(headers, "'headers' cannot be null");
Assert.noNullElements(headers, "'headers' cannot include null elements");
for (HeaderNames.HeadersToAdd header : headers) {
this.whichHeaders.remove(header);
}
}
/**
* Set the header inclusion bit for the header name.
* @param headers the headers to set.
* @since 2.8.4
*/
public void includeHeader(HeaderNames.HeadersToAdd... headers) {
Assert.notNull(headers, "'headers' cannot be null");
Assert.noNullElements(headers, "'headers' cannot include null elements");
for (HeaderNames.HeadersToAdd header : headers) {
this.whichHeaders.add(header);
}
}
/**
* Add a function which will be called to obtain additional headers to add to the
* published record. Functions are called in the order that they are added, and after
* any function passed into {@link #setHeadersFunction(BiFunction)}.
* @param headersFunction the headers function.
* @since 2.8.4
* @see #setHeadersFunction(BiFunction)
*/
public void addHeadersFunction(BiFunction<ConsumerRecord<?, ?>, Exception, Headers> headersFunction) {
Assert.notNull(headersFunction, "'headersFunction' cannot be null");
if (this.headersFunction.equals(DEFAULT_HEADERS_FUNCTION)) {
this.headersFunction = headersFunction;
}
else {
BiFunction<ConsumerRecord<?, ?>, Exception, Headers> toCompose = this.headersFunction;
this.headersFunction = (rec, ex) -> {
Headers headers1 = toCompose.apply(rec, ex);
if (headers1 == null) {
headers1 = new RecordHeaders();
}
Headers headers2 = headersFunction.apply(rec, ex);
try {
if (headers2 != null) {
headers2.forEach(headers1::add);
}
}
catch (IllegalStateException isex) {
headers1 = new RecordHeaders(headers1);
headers2.forEach(headers1::add); // NOSONAR, never null here
}
return headers1;
};
}
}
@SuppressWarnings({ "unchecked", DEPRECATION })
@Override
public void accept(ConsumerRecord<?, ?> record, @Nullable Consumer<?, ?> consumer, Exception exception) {
TopicPartition tp = this.destinationResolver.apply(record, exception);
if (tp == null) {
maybeThrow(record, exception);
this.logger.debug(() -> "Recovery of " + ListenerUtils.recordToString(record, true)
+ " skipped because destination resolver returned null");
return;
}
if (this.skipSameTopicFatalExceptions
&& tp.topic().equals(record.topic())
&& !getClassifier().classify(exception)) {
this.logger.error("Recovery of " + ListenerUtils.recordToString(record, true)
+ " skipped because not retryable exception " + exception.toString()
+ " and the destination resolver routed back to the same topic");
return;
}
if (consumer != null && this.verifyPartition) {
tp = checkPartition(tp, consumer);
}
DeserializationException vDeserEx = ListenerUtils.getExceptionFromHeader(record,
SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER, this.logger);
DeserializationException kDeserEx = ListenerUtils.getExceptionFromHeader(record,
SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER, this.logger);
Headers headers = new RecordHeaders(record.headers().toArray());
addAndEnhanceHeaders(record, exception, vDeserEx, kDeserEx, headers);
ProducerRecord<Object, Object> outRecord = createProducerRecord(record, tp, headers,
kDeserEx == null ? null : kDeserEx.getData(), vDeserEx == null ? null : vDeserEx.getData());
KafkaOperations<Object, Object> kafkaTemplate =
(KafkaOperations<Object, Object>) this.templateResolver.apply(outRecord);
sendOrThrow(outRecord, kafkaTemplate, record);
}
private void addAndEnhanceHeaders(ConsumerRecord<?, ?> record, Exception exception,
@Nullable DeserializationException vDeserEx, @Nullable DeserializationException kDeserEx, Headers headers) {
if (kDeserEx != null) {
if (!this.retainExceptionHeader) {
headers.remove(SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER);
}
this.exceptionHeadersCreator.create(headers, kDeserEx, true, this.headerNames);
}
if (vDeserEx != null) {
if (!this.retainExceptionHeader) {
headers.remove(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER);
}
this.exceptionHeadersCreator.create(headers, vDeserEx, false, this.headerNames);
}
if (kDeserEx == null && vDeserEx == null) {
this.exceptionHeadersCreator.create(headers, exception, false, this.headerNames);
}
enhanceHeaders(headers, record, exception); // NOSONAR headers are never null
}
private void sendOrThrow(ProducerRecord<Object, Object> outRecord,
@Nullable KafkaOperations<Object, Object> kafkaTemplate, ConsumerRecord<?, ?> inRecord) {
if (kafkaTemplate != null) {
send(outRecord, kafkaTemplate, inRecord);
}
else {
throw new IllegalArgumentException("No kafka template returned for record " + outRecord);
}
}
private void maybeThrow(ConsumerRecord<?, ?> record, Exception exception) {
@SuppressWarnings(DEPRECATION)
String message = String.format("No destination returned for record %s and exception %s. " +
"failIfNoDestinationReturned: %s", ListenerUtils.recordToString(record), exception,
this.throwIfNoDestinationReturned);
this.logger.warn(message);
if (this.throwIfNoDestinationReturned) {
throw new IllegalArgumentException(message);
}
}
/**
* Send the record.
* @param outRecord the record.
* @param kafkaTemplate the template.
* @param inRecord the consumer record.
* @since 2.7
*/
protected void send(ProducerRecord<Object, Object> outRecord, KafkaOperations<Object, Object> kafkaTemplate,
ConsumerRecord<?, ?> inRecord) {
if (this.transactional && !kafkaTemplate.inTransaction() && !kafkaTemplate.isAllowNonTransactional()) {
kafkaTemplate.executeInTransaction(t -> {
publish(outRecord, t, inRecord);
return null;
});
}
else {
publish(outRecord, kafkaTemplate, inRecord);
}
}
private TopicPartition checkPartition(TopicPartition tp, Consumer<?, ?> consumer) {
if (tp.partition() < 0) {
return tp;
}
try {
List<PartitionInfo> partitions = consumer.partitionsFor(tp.topic(), this.partitionInfoTimeout);
if (partitions == null) {
this.logger.debug(() -> "Could not obtain partition info for " + tp.topic());
return tp;
}
boolean anyMatch = partitions.stream().anyMatch(pi -> pi.partition() == tp.partition());
if (!anyMatch) {
this.logger.warn(() -> "Destination resolver returned non-existent partition " + tp
+ ", KafkaProducer will determine partition to use for this topic");
return new TopicPartition(tp.topic(), -1);
}
return tp;
}
catch (Exception ex) {
this.logger.debug(ex, () -> "Could not obtain partition info for " + tp.topic());
return tp;
}
}
@SuppressWarnings("unchecked")
private KafkaOperations<Object, Object> findTemplateForValue(@Nullable Object value,
Map<Class<?>, KafkaOperations<?, ?>> templates) {
if (value == null) {
KafkaOperations<?, ?> operations = templates.get(Void.class);
if (operations == null) {
return (KafkaOperations<Object, Object>) templates.values().iterator().next();
}
else {
return (KafkaOperations<Object, Object>) operations;
}
}
Optional<Class<?>> key = templates.keySet()
.stream()
.filter((k) -> k.isAssignableFrom(value.getClass()))
.findFirst();
if (key.isPresent()) {
return (KafkaOperations<Object, Object>) templates.get(key.get());
}
this.logger.warn(() -> "Failed to find a template for " + value.getClass() + " attempting to use the last entry");
return (KafkaOperations<Object, Object>) templates.values()
.stream()
.reduce((first, second) -> second)
.get();
}
/**
* Subclasses can override this method to customize the producer record to send to the
* DLQ. The default implementation simply copies the key and value from the consumer
* record and adds the headers. The timestamp is not set (the original timestamp is in
* one of the headers). IMPORTANT: if the partition in the {@link TopicPartition} is
* less than 0, it must be set to null in the {@link ProducerRecord}.
* @param record the failed record
* @param topicPartition the {@link TopicPartition} returned by the destination
* resolver.
* @param headers the headers - original record headers plus DLT headers.
* @param key the key to use instead of the consumer record key.
* @param value the value to use instead of the consumer record value.
* @return the producer record to send.
* @see KafkaHeaders
*/
protected ProducerRecord<Object, Object> createProducerRecord(ConsumerRecord<?, ?> record,
TopicPartition topicPartition, Headers headers, @Nullable byte[] key, @Nullable byte[] value) {
return new ProducerRecord<>(topicPartition.topic(),
topicPartition.partition() < 0 ? null : topicPartition.partition(),
key != null ? key : record.key(),
value != null ? value : record.value(), headers);
}
/**
* Override this if you want more than just logging of the send result.
* @param outRecord the record to send.
* @param kafkaTemplate the template.
* @param inRecord the consumer record.
* @since 2.2.5
*/
@SuppressWarnings(DEPRECATION)
protected void publish(ProducerRecord<Object, Object> outRecord, KafkaOperations<Object, Object> kafkaTemplate,
ConsumerRecord<?, ?> inRecord) {
ListenableFuture<SendResult<Object, Object>> sendResult = null;
try {
sendResult = kafkaTemplate.send(outRecord);
sendResult.addCallback(result -> {
this.logger.debug(() -> "Successful dead-letter publication: "
+ ListenerUtils.recordToString(inRecord, true) + " to " + result.getRecordMetadata());
}, ex -> {
this.logger.error(ex, () -> pubFailMessage(outRecord, inRecord));
});
}
catch (Exception e) {
this.logger.error(e, () -> pubFailMessage(outRecord, inRecord));
}
if (this.failIfSendResultIsError) {
verifySendResult(kafkaTemplate, outRecord, sendResult, inRecord);
}
}
private void verifySendResult(KafkaOperations<Object, Object> kafkaTemplate,
ProducerRecord<Object, Object> outRecord,
@Nullable ListenableFuture<SendResult<Object, Object>> sendResult, ConsumerRecord<?, ?> inRecord) {
Duration sendTimeout = determineSendTimeout(kafkaTemplate);
if (sendResult == null) {
throw new KafkaException(pubFailMessage(outRecord, inRecord));
}
try {
sendResult.get(sendTimeout.toMillis(), TimeUnit.MILLISECONDS);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new KafkaException(pubFailMessage(outRecord, inRecord), e);
}
catch (ExecutionException | TimeoutException e) {
throw new KafkaException(pubFailMessage(outRecord, inRecord), e);
}
}
@SuppressWarnings(DEPRECATION)
private String pubFailMessage(ProducerRecord<Object, Object> outRecord, ConsumerRecord<?, ?> inRecord) {
return "Dead-letter publication to "
+ outRecord.topic() + "failed for: " + ListenerUtils.recordToString(inRecord, true);
}
private Duration determineSendTimeout(KafkaOperations<?, ?> template) {
ProducerFactory<? extends Object, ? extends Object> producerFactory = template.getProducerFactory();
if (producerFactory != null) { // NOSONAR - will only occur in mock tests
Map<String, Object> props = producerFactory.getConfigurationProperties();
if (props != null) { // NOSONAR - will only occur in mock tests
return KafkaUtils.determineSendTimeout(props, this.timeoutBuffer,
this.waitForSendResultTimeout.toMillis());
}
}
return Duration.ofSeconds(THIRTY);
}
private void enhanceHeaders(Headers kafkaHeaders, ConsumerRecord<?, ?> record, Exception exception) {
maybeAddOriginalHeaders(kafkaHeaders, record, exception);
Headers headers = this.headersFunction.apply(record, exception);
if (headers != null) {
headers.forEach(kafkaHeaders::add);
}
}
private void maybeAddOriginalHeaders(Headers kafkaHeaders, ConsumerRecord<?, ?> record, Exception ex) {
maybeAddHeader(kafkaHeaders, this.headerNames.original.topicHeader,
record.topic().getBytes(StandardCharsets.UTF_8), HeaderNames.HeadersToAdd.TOPIC);
maybeAddHeader(kafkaHeaders, this.headerNames.original.partitionHeader,
ByteBuffer.allocate(Integer.BYTES).putInt(record.partition()).array(),
HeaderNames.HeadersToAdd.PARTITION);
maybeAddHeader(kafkaHeaders, this.headerNames.original.offsetHeader,
ByteBuffer.allocate(Long.BYTES).putLong(record.offset()).array(), HeaderNames.HeadersToAdd.OFFSET);
maybeAddHeader(kafkaHeaders, this.headerNames.original.timestampHeader,
ByteBuffer.allocate(Long.BYTES).putLong(record.timestamp()).array(), HeaderNames.HeadersToAdd.TS);
maybeAddHeader(kafkaHeaders, this.headerNames.original.timestampTypeHeader,
record.timestampType().toString().getBytes(StandardCharsets.UTF_8), HeaderNames.HeadersToAdd.TS_TYPE);
if (ex instanceof ListenerExecutionFailedException) {
String consumerGroup = ((ListenerExecutionFailedException) ex).getGroupId();
if (consumerGroup != null) {
maybeAddHeader(kafkaHeaders, this.headerNames.original.consumerGroup,
consumerGroup.getBytes(StandardCharsets.UTF_8), HeaderNames.HeadersToAdd.GROUP);
}
}
}
private void maybeAddHeader(Headers kafkaHeaders, String header, byte[] value, HeaderNames.HeadersToAdd hta) {
if (this.whichHeaders.contains(hta)
&& (this.appendOriginalHeaders || kafkaHeaders.lastHeader(header) == null)) {
kafkaHeaders.add(header, value);
}
}
private void addExceptionInfoHeaders(Headers kafkaHeaders, Exception exception, boolean isKey,
HeaderNames names) {
appendOrReplace(kafkaHeaders, new RecordHeader(isKey ? names.exceptionInfo.keyExceptionFqcn
: names.exceptionInfo.exceptionFqcn,
exception.getClass().getName().getBytes(StandardCharsets.UTF_8)), HeaderNames.HeadersToAdd.EXCEPTION);
if (exception.getCause() != null) {
appendOrReplace(kafkaHeaders, new RecordHeader(names.exceptionInfo.exceptionCauseFqcn,
exception.getCause().getClass().getName().getBytes(StandardCharsets.UTF_8)),
HeaderNames.HeadersToAdd.EX_CAUSE);
}
String message = exception.getMessage();
if (message != null) {
appendOrReplace(kafkaHeaders, new RecordHeader(isKey
? names.exceptionInfo.keyExceptionMessage
: names.exceptionInfo.exceptionMessage,
exception.getMessage().getBytes(StandardCharsets.UTF_8)), HeaderNames.HeadersToAdd.EX_MSG);
}
appendOrReplace(kafkaHeaders, new RecordHeader(isKey
? names.exceptionInfo.keyExceptionStacktrace
: names.exceptionInfo.exceptionStacktrace,
getStackTraceAsString(exception).getBytes(StandardCharsets.UTF_8)),
HeaderNames.HeadersToAdd.EX_STACKTRACE);
}
private void appendOrReplace(Headers headers, RecordHeader header, HeaderNames.HeadersToAdd hta) {
if (this.whichHeaders.contains(hta)) {
if (this.stripPreviousExceptionHeaders) {
headers.remove(header.key());
}
headers.add(header);
}
}
private String getStackTraceAsString(Throwable cause) {
StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter, true);
cause.printStackTrace(printWriter);
return stringWriter.getBuffer().toString();
}
/**
* Override this if you want different header names to be used
* in the sent record.
* @return the header names.
* @since 2.7
*/
protected HeaderNames getHeaderNames() {
return HeaderNames.Builder
.original()
.offsetHeader(KafkaHeaders.DLT_ORIGINAL_OFFSET)
.timestampHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP)
.timestampTypeHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE)
.topicHeader(KafkaHeaders.DLT_ORIGINAL_TOPIC)
.partitionHeader(KafkaHeaders.DLT_ORIGINAL_PARTITION)
.consumerGroupHeader(KafkaHeaders.DLT_ORIGINAL_CONSUMER_GROUP)
.exception()
.keyExceptionFqcn(KafkaHeaders.DLT_KEY_EXCEPTION_FQCN)
.exceptionFqcn(KafkaHeaders.DLT_EXCEPTION_FQCN)
.exceptionCauseFqcn(KafkaHeaders.DLT_EXCEPTION_CAUSE_FQCN)
.keyExceptionMessage(KafkaHeaders.DLT_KEY_EXCEPTION_MESSAGE)
.exceptionMessage(KafkaHeaders.DLT_EXCEPTION_MESSAGE)
.keyExceptionStacktrace(KafkaHeaders.DLT_KEY_EXCEPTION_STACKTRACE)
.exceptionStacktrace(KafkaHeaders.DLT_EXCEPTION_STACKTRACE)
.build();
}
/**
* Container class for the name of the headers that will
* be added to the produced record.
* @since 2.7
*/
public static class HeaderNames {
/**
* Bits representing which headers to add.
* @since 2.8.4
*/
public enum HeadersToAdd {
/**
* The offset of the failed record.
*/
OFFSET,
/**
* The timestamp of the failed record.
*/
TS,
/**
* The timestamp type of the failed record.
*/
TS_TYPE,
/**
* The original topic of the failed record.
*/
TOPIC,
/**
* The partition from which the failed record was received.
*/
PARTITION,
/**
* The consumer group that received the failed record.
*/
GROUP,
/**
* The exception class name.
*/
EXCEPTION,
/**
* The exception cause class name.
*/
EX_CAUSE,
/**
* The exception message.
*/
EX_MSG,
/**
* The exception stack trace.
*/
EX_STACKTRACE;
}
private final HeaderNames.Original original;
private final ExceptionInfo exceptionInfo;
HeaderNames(HeaderNames.Original original, ExceptionInfo exceptionInfo) {
this.original = original;
this.exceptionInfo = exceptionInfo;
}
/**
* The header names for the original record headers.
* @return the original.
* @since 2.8.4
*/
public HeaderNames.Original getOriginal() {
return this.original;
}
/**
* The header names for the exception headers.
* @return the exceptionInfo
* @since 2.8.4
*/
public ExceptionInfo getExceptionInfo() {
return this.exceptionInfo;
}
/**
* Header names for original record property headers.
*
* @since 2.8.4
*/
public static class Original {
final String offsetHeader; // NOSONAR
final String timestampHeader; // NOSONAR
final String timestampTypeHeader; // NOSONAR
final String topicHeader; // NOSONAR
final String partitionHeader; // NOSONAR
final String consumerGroup; // NOSONAR
Original(String offsetHeader,
String timestampHeader,
String timestampTypeHeader,
String topicHeader,
String partitionHeader,
String consumerGroup) {
this.offsetHeader = offsetHeader;
this.timestampHeader = timestampHeader;
this.timestampTypeHeader = timestampTypeHeader;
this.topicHeader = topicHeader;
this.partitionHeader = partitionHeader;
this.consumerGroup = consumerGroup;
}
/**
* The header name for the offset.
* @return the offsetHeader.
*/
public String getOffsetHeader() {
return this.offsetHeader;
}
/**
* The header name for the timestamp.
* @return the timestampHeader.
*/
public String getTimestampHeader() {
return this.timestampHeader;
}
/**
* The header name for the timestamp type.
* @return the timestampTypeHeader.
*/
public String getTimestampTypeHeader() {
return this.timestampTypeHeader;
}
/**
* The header name for the topic.
* @return the topicHeader.
*/
public String getTopicHeader() {
return this.topicHeader;
}
/**
* The header name for the partition.
* @return the partitionHeader
*/
public String getPartitionHeader() {
return this.partitionHeader;
}
/**
* The header name for the consumer group.
* @return the consumerGroup
*/
public String getConsumerGroup() {
return this.consumerGroup;
}
}
/**
* Header names for exception headers.
*
* @since 2.8.4
*/
public static class ExceptionInfo {
final String keyExceptionFqcn; // NOSONAR
final String exceptionFqcn; // NOSONAR
final String exceptionCauseFqcn; // NOSONAR
final String keyExceptionMessage; // NOSONAR
final String exceptionMessage; // NOSONAR
final String keyExceptionStacktrace; // NOSONAR
final String exceptionStacktrace; // NOSONAR
ExceptionInfo(String keyExceptionFqcn,
String exceptionFqcn,
String exceptionCauseFqcn,
String keyExceptionMessage,
String exceptionMessage,
String keyExceptionStacktrace,
String exceptionStacktrace) {
this.keyExceptionFqcn = keyExceptionFqcn;
this.exceptionFqcn = exceptionFqcn;
this.exceptionCauseFqcn = exceptionCauseFqcn;
this.keyExceptionMessage = keyExceptionMessage;
this.exceptionMessage = exceptionMessage;
this.keyExceptionStacktrace = keyExceptionStacktrace;
this.exceptionStacktrace = exceptionStacktrace;
}
/**
* The header name for the key exception class.
* @return the keyExceptionFqcn.
*/
public String getKeyExceptionFqcn() {
return this.keyExceptionFqcn;
}
/**
* The header name for the value exception class.
* @return the exceptionFqcn.
*/
public String getExceptionFqcn() {
return this.exceptionFqcn;
}
/**
* The header name for the exception cause.
* @return the exceptionCauseFqcn.
*/
public String getExceptionCauseFqcn() {
return this.exceptionCauseFqcn;
}
/**
* The header name for the key exception message.
* @return the keyExceptionMessage.
*/
public String getKeyExceptionMessage() {
return this.keyExceptionMessage;
}
/**
* The header name for the exception message.
* @return the exceptionMessage.
*/
public String getExceptionMessage() {
return this.exceptionMessage;
}
/**
* The header name for the key exception stack trace.
* @return the keyExceptionStacktrace
*/
public String getKeyExceptionStacktrace() {
return this.keyExceptionStacktrace;
}
/**
* The header name for the exception stack trace.
* @return the exceptionStacktrace
*/
public String getExceptionStacktrace() {
return this.exceptionStacktrace;
}
}
/**
* Provides a convenient API for creating
* {@link DeadLetterPublishingRecoverer.HeaderNames}.
*
* @author Tomaz Fernandes
* @since 2.7
* @see HeaderNames
*/
public static class Builder {
private final Original original = new Original();
private final ExceptionInfo exceptionInfo = new ExceptionInfo();
public static Builder.Original original() {
return new Builder().original;
}
/**
* Headers for data relative to the original record.
*
* @author Tomaz Fernandes
* @since 2.7
*/
public class Original {
private String offsetHeader;
private String timestampHeader;
private String timestampTypeHeader;
private String topicHeader;
private String partitionHeader;
private String consumerGroupHeader;
/**
* Sets the name of the header that will be used to store the offset
* of the original record.
* @param offsetHeader the offset header name.
* @return the Original builder instance
* @since 2.7
*/
public Builder.Original offsetHeader(String offsetHeader) {
this.offsetHeader = offsetHeader;
return this;
}
/**
* Sets the name of the header that will be used to store the timestamp
* of the original record.
* @param timestampHeader the timestamp header name.
* @return the Original builder instance
* @since 2.7
*/
public Builder.Original timestampHeader(String timestampHeader) {
this.timestampHeader = timestampHeader;
return this;
}
/**
* Sets the name of the header that will be used to store the timestampType
* of the original record.
* @param timestampTypeHeader the timestampType header name.
* @return the Original builder instance
* @since 2.7
*/
public Builder.Original timestampTypeHeader(String timestampTypeHeader) {
this.timestampTypeHeader = timestampTypeHeader;
return this;
}
/**
* Sets the name of the header that will be used to store the topic
* of the original record.
* @param topicHeader the topic header name.
* @return the Original builder instance
* @since 2.7
*/
public Builder.Original topicHeader(String topicHeader) {
this.topicHeader = topicHeader;
return this;
}
/**
* Sets the name of the header that will be used to store the partition
* of the original record.
* @param partitionHeader the partition header name.
* @return the Original builder instance
* @since 2.7
*/
public Builder.Original partitionHeader(String partitionHeader) {
this.partitionHeader = partitionHeader;
return this;
}
/**
* Sets the name of the header that will be used to store the consumer
* group that failed to consume the original record.
* @param consumerGroupHeader the consumer group header name.
* @return the Original builder instance
* @since 2.8
*/
public Builder.Original consumerGroupHeader(String consumerGroupHeader) {
this.consumerGroupHeader = consumerGroupHeader;
return this;
}
/**
* Returns the exception builder.
* @return the exception builder.
* @since 2.7
*/
public ExceptionInfo exception() {
return Builder.this.exceptionInfo;
}
/**
* Builds the Original header names, asserting that none of them is null.
* @return the Original header names.
* @since 2.7
*/
private DeadLetterPublishingRecoverer.HeaderNames.Original build() {
Assert.notNull(this.offsetHeader, "offsetHeader cannot be null");
Assert.notNull(this.timestampHeader, "timestampHeader cannot be null");
Assert.notNull(this.timestampTypeHeader, "timestampTypeHeader cannot be null");
Assert.notNull(this.topicHeader, "topicHeader cannot be null");
Assert.notNull(this.partitionHeader, "partitionHeader cannot be null");
Assert.notNull(this.consumerGroupHeader, "consumerGroupHeader cannot be null");
return new DeadLetterPublishingRecoverer.HeaderNames.Original(this.offsetHeader,
this.timestampHeader,
this.timestampTypeHeader,
this.topicHeader,
this.partitionHeader,
this.consumerGroupHeader);
}
}
/**
* Headers for data relative to the exception thrown.
*
* @author Tomaz Fernandes
* @since 2.7
*/
public class ExceptionInfo {
private String keyExceptionFqcn;
private String exceptionFqcn;
private String exceptionCauseFqcn;
private String keyExceptionMessage;
private String exceptionMessage;
private String keyExceptionStacktrace;
private String exceptionStacktrace;
/**
* Sets the name of the header that will be used to store the keyExceptionFqcn
* of the original record.
* @param keyExceptionFqcn the keyExceptionFqcn header name.
* @return the Exception builder instance
* @since 2.7
*/
public ExceptionInfo keyExceptionFqcn(String keyExceptionFqcn) {
this.keyExceptionFqcn = keyExceptionFqcn;
return this;
}
/**
* Sets the name of the header that will be used to store the exceptionFqcn
* of the original record.
* @param exceptionFqcn the exceptionFqcn header name.
* @return the Exception builder instance
* @since 2.7
*/
public ExceptionInfo exceptionFqcn(String exceptionFqcn) {
this.exceptionFqcn = exceptionFqcn;
return this;
}
/**
* Sets the name of the header that will be used to store the exceptionCauseFqcn
* of the original record.
* @param exceptionCauseFqcn the exceptionFqcn header name.
* @return the Exception builder instance
* @since 2.8
*/
public ExceptionInfo exceptionCauseFqcn(String exceptionCauseFqcn) {
this.exceptionCauseFqcn = exceptionCauseFqcn;
return this;
}
/**
* Sets the name of the header that will be used to store the keyExceptionMessage
* of the original record.
* @param keyExceptionMessage the keyExceptionMessage header name.
* @return the Exception builder instance
* @since 2.7
*/
public ExceptionInfo keyExceptionMessage(String keyExceptionMessage) {
this.keyExceptionMessage = keyExceptionMessage;
return this;
}
/**
* Sets the name of the header that will be used to store the exceptionMessage
* of the original record.
* @param exceptionMessage the exceptionMessage header name.
* @return the Exception builder instance
* @since 2.7
*/
public ExceptionInfo exceptionMessage(String exceptionMessage) {
this.exceptionMessage = exceptionMessage;
return this;
}
/**
* Sets the name of the header that will be used to store the
* keyExceptionStacktrace of the original record.
* @param keyExceptionStacktrace the keyExceptionStacktrace header name.
* @return the Exception builder instance
* @since 2.7
*/
public ExceptionInfo keyExceptionStacktrace(String keyExceptionStacktrace) {
this.keyExceptionStacktrace = keyExceptionStacktrace;
return this;
}
/**
* Sets the name of the header that will be used to store the
* exceptionStacktrace of the original record.
* @param exceptionStacktrace the exceptionStacktrace header name.
* @return the Exception builder instance
* @since 2.7
*/
public ExceptionInfo exceptionStacktrace(String exceptionStacktrace) {
this.exceptionStacktrace = exceptionStacktrace;
return this;
}
/**
* Builds the Header Names, asserting that none of them is null.
* @return the HeaderNames instance.
* @since 2.7
*/
public DeadLetterPublishingRecoverer.HeaderNames build() {
Assert.notNull(this.keyExceptionFqcn, "keyExceptionFqcn header cannot be null");
Assert.notNull(this.exceptionFqcn, "exceptionFqcn header cannot be null");
Assert.notNull(this.exceptionCauseFqcn, "exceptionCauseFqcn header cannot be null");
Assert.notNull(this.keyExceptionMessage, "keyExceptionMessage header cannot be null");
Assert.notNull(this.exceptionMessage, "exceptionMessage header cannot be null");
Assert.notNull(this.keyExceptionStacktrace, "keyExceptionStacktrace header cannot be null");
Assert.notNull(this.exceptionStacktrace, "exceptionStacktrace header cannot be null");
return new DeadLetterPublishingRecoverer.HeaderNames(Builder.this.original.build(),
new HeaderNames.ExceptionInfo(this.keyExceptionFqcn,
this.exceptionFqcn,
this.exceptionCauseFqcn,
this.keyExceptionMessage,
this.exceptionMessage,
this.keyExceptionStacktrace,
this.exceptionStacktrace));
}
}
}
}
/**
* Use this to provide a custom implementation to take complete control over exception
* header creation for the output record.
*
* @since 2.8.4
*/
public interface ExceptionHeadersCreator {
/**
* Create exception headers.
* @param kafkaHeaders the {@link Headers} to add the header(s) to.
* @param exception The exception.
* @param isKey whether the exception is for a key or value.
* @param headerNames the heaader names to use.
*/
void create(Headers kafkaHeaders, Exception exception, boolean isKey, HeaderNames headerNames);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/DefaultAfterRollbackProcessor.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.Collections;
import java.util.List;
import java.util.function.BiConsumer;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.springframework.kafka.core.KafkaOperations;
import org.springframework.kafka.listener.ContainerProperties.EOSMode;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.backoff.BackOff;
import org.springframework.util.backoff.BackOffExecution;
/**
* Default implementation of {@link AfterRollbackProcessor}. Seeks all
* topic/partitions so the records will be re-fetched, including the failed
* record. Starting with version 2.2 after a configurable number of failures
* for the same topic/partition/offset, that record will be skipped after
* calling a {@link BiConsumer} recoverer. The default recoverer simply logs
* the failed record.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*
* @since 1.3.5
*
*/
public class DefaultAfterRollbackProcessor<K, V> extends FailedRecordProcessor
implements AfterRollbackProcessor<K, V> {
private final ThreadLocal<BackOffExecution> backOffs = new ThreadLocal<>(); // Intentionally not static
private final ThreadLocal<Long> lastIntervals = new ThreadLocal<>(); // Intentionally not static
private final BackOff backOff;
private KafkaOperations<?, ?> kafkaTemplate;
/**
* Construct an instance with the default recoverer which simply logs the record after
* {@value SeekUtils#DEFAULT_MAX_FAILURES} (maxFailures) have occurred for a
* topic/partition/offset.
* @since 2.2
*/
public DefaultAfterRollbackProcessor() {
this(null, SeekUtils.DEFAULT_BACK_OFF);
}
/**
* Construct an instance with the default recoverer which simply logs the record after
* the backOff returns STOP for a topic/partition/offset.
* @param backOff the {@link BackOff}.
* @since 2.3
*/
public DefaultAfterRollbackProcessor(BackOff backOff) {
this(null, backOff);
}
/**
* Construct an instance with the provided recoverer which will be called after
* {@value SeekUtils#DEFAULT_MAX_FAILURES} (maxFailures) have occurred for a
* topic/partition/offset.
* @param recoverer the recoverer.
* @since 2.2
*/
public DefaultAfterRollbackProcessor(BiConsumer<ConsumerRecord<?, ?>, Exception> recoverer) {
this(recoverer, SeekUtils.DEFAULT_BACK_OFF);
}
/**
* Construct an instance with the provided recoverer which will be called after
* the backOff returns STOP for a topic/partition/offset.
* @param recoverer the recoverer; if null, the default (logging) recoverer is used.
* @param backOff the {@link BackOff}.
* @since 2.3
*/
public DefaultAfterRollbackProcessor(@Nullable BiConsumer<ConsumerRecord<?, ?>, Exception> recoverer,
BackOff backOff) {
this(recoverer, backOff, null, false);
}
/**
* Construct an instance with the provided recoverer which will be called after the
* backOff returns STOP for a topic/partition/offset.
* @param recoverer the recoverer; if null, the default (logging) recoverer is used.
* @param backOff the {@link BackOff}.
* @param kafkaOperations for sending the recovered offset to the transaction.
* @param commitRecovered true to commit the recovered record's offset; requires a
* {@link KafkaOperations}.
* @since 2.5.3
*/
public DefaultAfterRollbackProcessor(@Nullable BiConsumer<ConsumerRecord<?, ?>, Exception> recoverer,
BackOff backOff, @Nullable KafkaOperations<?, ?> kafkaOperations, boolean commitRecovered) {
super(recoverer, backOff);
this.kafkaTemplate = kafkaOperations;
super.setCommitRecovered(commitRecovered);
checkConfig();
this.backOff = backOff;
}
private void checkConfig() {
Assert.isTrue(!isCommitRecovered() || this.kafkaTemplate != null,
"A KafkaOperations is required when 'commitRecovered' is true");
}
@SuppressWarnings({ "unchecked", "rawtypes", "deprecation" })
@Override
public void process(List<ConsumerRecord<K, V>> records, Consumer<K, V> consumer,
@Nullable MessageListenerContainer container, Exception exception, boolean recoverable, EOSMode eosMode) {
if (SeekUtils.doSeeks(((List) records), consumer, exception, recoverable,
getRecoveryStrategy((List) records, exception), container, this.logger)
&& isCommitRecovered() && this.kafkaTemplate.isTransactional()) {
ConsumerRecord<K, V> skipped = records.get(0);
if (EOSMode.V1.equals(eosMode.getMode())) {
this.kafkaTemplate.sendOffsetsToTransaction(
Collections.singletonMap(new TopicPartition(skipped.topic(), skipped.partition()),
new OffsetAndMetadata(skipped.offset() + 1)));
}
else {
this.kafkaTemplate.sendOffsetsToTransaction(
Collections.singletonMap(new TopicPartition(skipped.topic(), skipped.partition()),
new OffsetAndMetadata(skipped.offset() + 1)), consumer.groupMetadata());
}
}
if (!recoverable && this.backOff != null) {
try {
ListenerUtils.unrecoverableBackOff(this.backOff, this.backOffs, this.lastIntervals, container);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
@Override
public boolean isProcessInTransaction() {
return isCommitRecovered();
}
@Override
public void clearThreadState() {
super.clearThreadState();
this.backOffs.remove();
this.lastIntervals.remove();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/DefaultErrorHandler.java | /*
* Copyright 2021-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.errors.SerializationException;
import org.springframework.lang.Nullable;
import org.springframework.util.backoff.BackOff;
/**
* An error handler that, for record listeners, seeks to the current offset for each topic
* in the remaining records. Used to rewind partitions after a message failure so that it
* can be replayed. For batch listeners, seeks to the current offset for each topic in a
* batch of records. Used to rewind partitions after a message failure so that the batch
* can be replayed. If the listener throws a {@link BatchListenerFailedException}, with
* the failed record. The records before the record will have their offsets committed and
* the partitions for the remaining records will be repositioned and/or the failed record
* can be recovered and skipped. If some other exception is thrown, or a valid record is
* not provided in the exception, error handling is delegated to a
* {@link FallbackBatchErrorHandler} with this handler's {@link BackOff}. If the record is
* recovered, its offset is committed. This is a replacement for the legacy
* {@link SeekToCurrentErrorHandler} and {@link SeekToCurrentBatchErrorHandler} (but the
* fallback now can send the messages to a recoverer after retries are completed instead
* of retrying indefinitely).
*
* @author Gary Russell
*
* @since 2.8
*
*/
public class DefaultErrorHandler extends FailedBatchProcessor implements CommonErrorHandler {
private boolean ackAfterHandle = true;
/**
* Construct an instance with the default recoverer which simply logs the record after
* {@value SeekUtils#DEFAULT_MAX_FAILURES} (maxFailures) have occurred for a
* topic/partition/offset, with the default back off (9 retries, no delay).
*/
public DefaultErrorHandler() {
this(null, SeekUtils.DEFAULT_BACK_OFF);
}
/**
* Construct an instance with the default recoverer which simply logs the record after
* the backOff returns STOP for a topic/partition/offset.
* @param backOff the {@link BackOff}.
*/
public DefaultErrorHandler(BackOff backOff) {
this(null, backOff);
}
/**
* Construct an instance with the provided recoverer which will be called after
* {@value SeekUtils#DEFAULT_MAX_FAILURES} (maxFailures) have occurred for a
* topic/partition/offset.
* @param recoverer the recoverer.
*/
public DefaultErrorHandler(ConsumerRecordRecoverer recoverer) {
this(recoverer, SeekUtils.DEFAULT_BACK_OFF);
}
/**
* Construct an instance with the provided recoverer which will be called after
* the backOff returns STOP for a topic/partition/offset.
* @param recoverer the recoverer; if null, the default (logging) recoverer is used.
* @param backOff the {@link BackOff}.
*/
public DefaultErrorHandler(@Nullable ConsumerRecordRecoverer recoverer, BackOff backOff) {
super(recoverer, backOff, createFallback(backOff, recoverer));
}
private static CommonErrorHandler createFallback(BackOff backOff, @Nullable ConsumerRecordRecoverer recoverer) {
return new ErrorHandlerAdapter(new FallbackBatchErrorHandler(backOff, recoverer));
}
/**
* {@inheritDoc}
* The container must be configured with
* {@link org.springframework.kafka.listener.ContainerProperties.AckMode#MANUAL_IMMEDIATE}.
* Whether or not the commit is sync or async depends on the container's syncCommits
* property.
* @param commitRecovered true to commit.
*/
@Override
public void setCommitRecovered(boolean commitRecovered) { // NOSONAR enhanced javadoc
super.setCommitRecovered(commitRecovered);
}
@Override
public boolean isAckAfterHandle() {
return this.ackAfterHandle;
}
@Override
public void setAckAfterHandle(boolean ackAfterHandle) {
this.ackAfterHandle = ackAfterHandle;
}
@Override
public boolean remainingRecords() {
return true;
}
@Override
public boolean deliveryAttemptHeader() {
return true;
}
@Override
public void handleRemaining(Exception thrownException, List<ConsumerRecord<?, ?>> records,
Consumer<?, ?> consumer, MessageListenerContainer container) {
SeekUtils.seekOrRecover(thrownException, records, consumer, container, isCommitRecovered(), // NOSONAR
getRecoveryStrategy(records, consumer, thrownException), this.logger, getLogLevel());
}
@Override
public void handleBatch(Exception thrownException, ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container, Runnable invokeListener) {
doHandle(thrownException, data, consumer, container, invokeListener);
}
@Override
public void handleOtherException(Exception thrownException, Consumer<?, ?> consumer,
MessageListenerContainer container, boolean batchListener) {
if (thrownException instanceof SerializationException) {
throw new IllegalStateException("This error handler cannot process 'SerializationException's directly; "
+ "please consider configuring an 'ErrorHandlingDeserializer' in the value and/or key "
+ "deserializer", thrownException);
}
else {
throw new IllegalStateException("This error handler cannot process '"
+ thrownException.getClass().getName()
+ "'s; no record information is available", thrownException);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/DelegatingMessageListener.java | /*
* Copyright 2017-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
/**
* Classes implementing this interface allow containers to determine the type of the
* ultimate listener.
*
* @param <T> the type received by the listener.
*
* @author Gary Russell
* @since 2.0
*
*/
public interface DelegatingMessageListener<T> {
/**
* Return the delegate.
* @return the delegate.
*/
T getDelegate();
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/DeliveryAttemptAware.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import org.springframework.kafka.support.TopicPartitionOffset;
/**
* A component implementing this interface can provide the next delivery attempt.
*
* @author Gary Russell
* @since 2.5
*
*/
@FunctionalInterface
public interface DeliveryAttemptAware {
/**
* Return the next delivery attempt for the topic/partition/offset.
* @param topicPartitionOffset the topic/partition/offset.
* @return the next delivery attempt.
*/
int deliveryAttempt(TopicPartitionOffset topicPartitionOffset);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ErrorHandler.java | /*
* Copyright 2015-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
/**
* Handles errors thrown during the execution of a {@link MessageListener}.
*
* @author Marius Bogoevici
* @author Gary Russell
*/
public interface ErrorHandler extends GenericErrorHandler<ConsumerRecord<?, ?>> {
/**
* Handle the exception.
* @param thrownException the exception.
* @param records the remaining records including the one that failed.
* @param consumer the consumer.
* @param container the container.
*/
default void handle(Exception thrownException, List<ConsumerRecord<?, ?>> records, Consumer<?, ?> consumer,
MessageListenerContainer container) {
handle(thrownException, null);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ErrorHandlerAdapter.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.Collections;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.util.Assert;
/**
* Adapts a legacy {@link ErrorHandler} or {@link BatchErrorHandler}.
*
* @author Gary Russell
* @since 2.7.4
*
*/
class ErrorHandlerAdapter implements CommonErrorHandler {
@SuppressWarnings({ "rawtypes", "unchecked" })
private static final ConsumerRecords EMPTY_BATCH = new ConsumerRecords(Collections.emptyMap());
private final ErrorHandler errorHandler;
private final BatchErrorHandler batchErrorHandler;
/**
* Adapt an {@link ErrorHandler}.
* @param errorHandler the handler.
*/
ErrorHandlerAdapter(ErrorHandler errorHandler) {
Assert.notNull(errorHandler, "'errorHandler' cannot be null");
this.errorHandler = errorHandler;
this.batchErrorHandler = null;
}
/**
* Adapt a {@link BatchErrorHandler}.
* @param batchErrorHandler the handler.
*/
ErrorHandlerAdapter(BatchErrorHandler batchErrorHandler) {
Assert.notNull(batchErrorHandler, "'batchErrorHandler' cannot be null");
this.errorHandler = null;
this.batchErrorHandler = batchErrorHandler;
}
@Override
public boolean remainingRecords() {
return this.errorHandler instanceof RemainingRecordsErrorHandler;
}
@Override
public boolean deliveryAttemptHeader() {
return this.errorHandler instanceof DeliveryAttemptAware;
}
@Override
public void clearThreadState() {
if (this.errorHandler != null) {
this.errorHandler.clearThreadState();
}
else {
this.batchErrorHandler.clearThreadState();
}
}
@Override
public boolean isAckAfterHandle() {
if (this.errorHandler != null) {
return this.errorHandler.isAckAfterHandle();
}
else {
return this.batchErrorHandler.isAckAfterHandle();
}
}
@Override
public void setAckAfterHandle(boolean ack) {
if (this.errorHandler != null) {
this.errorHandler.setAckAfterHandle(ack);
}
else {
this.batchErrorHandler.setAckAfterHandle(ack);
}
}
@Override
public int deliveryAttempt(TopicPartitionOffset topicPartitionOffset) {
Assert.state(deliveryAttemptHeader(), "This method should not be called by the container");
return ((DeliveryAttemptAware) this.errorHandler).deliveryAttempt(topicPartitionOffset);
}
@SuppressWarnings({ "unchecked" })
@Override
public void handleOtherException(Exception thrownException, Consumer<?, ?> consumer,
MessageListenerContainer container, boolean batchListener) {
if (this.errorHandler != null) {
this.errorHandler.handle(thrownException, Collections.EMPTY_LIST, consumer, container);
}
else {
this.batchErrorHandler.handle(thrownException, EMPTY_BATCH, consumer, container, () -> { });
}
}
@Override
public void handleRecord(Exception thrownException, ConsumerRecord<?, ?> record, Consumer<?, ?> consumer,
MessageListenerContainer container) {
if (this.errorHandler != null) {
this.errorHandler.handle(thrownException, record, consumer);
}
else {
CommonErrorHandler.super.handleRecord(thrownException, record, consumer, container);
}
}
@Override
public void handleRemaining(Exception thrownException, List<ConsumerRecord<?, ?>> records, Consumer<?, ?> consumer,
MessageListenerContainer container) {
if (this.errorHandler != null) {
this.errorHandler.handle(thrownException, records, consumer, container);
}
else {
CommonErrorHandler.super.handleRemaining(thrownException, records, consumer, container);
}
}
@Override
public void handleBatch(Exception thrownException, ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container, Runnable invokeListener) {
if (this.batchErrorHandler != null) {
this.batchErrorHandler.handle(thrownException, data, consumer, container, invokeListener);
}
else {
CommonErrorHandler.super.handleBatch(thrownException, data, consumer, container, invokeListener);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ErrorHandlingUtils.java | /*
* Copyright 2021-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.time.Duration;
import java.util.function.BiConsumer;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.KafkaException;
import org.springframework.util.backoff.BackOff;
import org.springframework.util.backoff.BackOffExecution;
/**
* Utilities for error handling.
*
* @author Gary Russell
* @since 2.8
*
*/
public final class ErrorHandlingUtils {
private ErrorHandlingUtils() {
}
/**
* Retry a complete batch by pausing the consumer and then, in a loop, poll the
* consumer, wait for the next back off, then call the listener. When retries are
* exhausted, call the recoverer with the {@link ConsumerRecords}.
* @param thrownException the exception.
* @param records the records.
* @param consumer the consumer.
* @param container the container.
* @param invokeListener the {@link Runnable} to run (call the listener).
* @param backOff the backOff.
* @param seeker the common error handler that re-seeks the entire batch.
* @param recoverer the recoverer.
* @param logger the logger.
* @param logLevel the log level.
*/
public static void retryBatch(Exception thrownException, ConsumerRecords<?, ?> records, Consumer<?, ?> consumer,
MessageListenerContainer container, Runnable invokeListener, BackOff backOff,
CommonErrorHandler seeker, BiConsumer<ConsumerRecords<?, ?>, Exception> recoverer, LogAccessor logger,
KafkaException.Level logLevel) {
BackOffExecution execution = backOff.start();
long nextBackOff = execution.nextBackOff();
String failed = null;
consumer.pause(consumer.assignment());
try {
while (nextBackOff != BackOffExecution.STOP) {
consumer.poll(Duration.ZERO);
try {
ListenerUtils.stoppableSleep(container, nextBackOff);
}
catch (InterruptedException e1) {
Thread.currentThread().interrupt();
seeker.handleBatch(thrownException, records, consumer, container, () -> { });
throw new KafkaException("Interrupted during retry", logLevel, e1);
}
if (!container.isRunning()) {
throw new KafkaException("Container stopped during retries");
}
try {
invokeListener.run();
return;
}
catch (Exception e) {
if (failed == null) {
failed = recordsToString(records);
}
String toLog = failed;
logger.debug(e, () -> "Retry failed for: " + toLog);
}
nextBackOff = execution.nextBackOff();
}
try {
recoverer.accept(records, thrownException);
}
catch (Exception e) {
logger.error(e, () -> "Recoverer threw an exception; re-seeking batch");
seeker.handleBatch(thrownException, records, consumer, container, () -> { });
}
}
finally {
consumer.resume(consumer.assignment());
}
}
/**
* Represent the records as a comma-delimited String of {@code topic-part@offset}.
* @param records the records.
* @return the String.
*/
@SuppressWarnings("deprecation")
public static String recordsToString(ConsumerRecords<?, ?> records) {
StringBuffer sb = new StringBuffer();
records.spliterator().forEachRemaining(rec -> sb
.append(ListenerUtils.recordToString(rec, true))
.append(','));
sb.deleteCharAt(sb.length() - 1);
return sb.toString();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/ExceptionClassifier.java | /*
* Copyright 2021-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.util.HashMap;
import java.util.Map;
import org.springframework.classify.BinaryExceptionClassifier;
import org.springframework.kafka.support.converter.ConversionException;
import org.springframework.kafka.support.serializer.DeserializationException;
import org.springframework.lang.Nullable;
import org.springframework.messaging.converter.MessageConversionException;
import org.springframework.messaging.handler.invocation.MethodArgumentResolutionException;
import org.springframework.util.Assert;
/**
* Supports exception classification.
*
* @author Gary Russell
* @since 2.8
*
*/
public abstract class ExceptionClassifier extends KafkaExceptionLogLevelAware {
private ExtendedBinaryExceptionClassifier classifier;
/**
* Construct the instance.
*/
public ExceptionClassifier() {
this.classifier = configureDefaultClassifier();
}
private static ExtendedBinaryExceptionClassifier configureDefaultClassifier() {
Map<Class<? extends Throwable>, Boolean> classified = new HashMap<>();
classified.put(DeserializationException.class, false);
classified.put(MessageConversionException.class, false);
classified.put(ConversionException.class, false);
classified.put(MethodArgumentResolutionException.class, false);
classified.put(NoSuchMethodException.class, false);
classified.put(ClassCastException.class, false);
return new ExtendedBinaryExceptionClassifier(classified, true);
}
/**
* By default, unmatched types classify as true. Call this method to make the default
* false, and remove types explicitly classified as false. This should be called before
* calling any of the classification modification methods.
* @since 2.8.4
*/
public void defaultFalse() {
this.classifier = new ExtendedBinaryExceptionClassifier(new HashMap<>(), false);
}
/**
* Return the exception classifier.
* @return the classifier.
*/
protected BinaryExceptionClassifier getClassifier() {
return this.classifier;
}
/**
* Set an exception classifications to determine whether the exception should cause a retry
* (until exhaustion) or not. If not, we go straight to the recoverer. By default,
* the following exceptions will not be retried:
* <ul>
* <li>{@link DeserializationException}</li>
* <li>{@link MessageConversionException}</li>
* <li>{@link MethodArgumentResolutionException}</li>
* <li>{@link NoSuchMethodException}</li>
* <li>{@link ClassCastException}</li>
* </ul>
* All others will be retried.
* When calling this method, the defaults will not be applied.
* @param classifications the classifications.
* @param defaultValue whether or not to retry non-matching exceptions.
* @see BinaryExceptionClassifier#BinaryExceptionClassifier(Map, boolean)
* @see #addNotRetryableExceptions(Class...)
*/
public void setClassifications(Map<Class<? extends Throwable>, Boolean> classifications, boolean defaultValue) {
Assert.notNull(classifications, "'classifications' + cannot be null");
this.classifier = new ExtendedBinaryExceptionClassifier(classifications, defaultValue);
}
/**
* Add exception types to the default list. By default, the following exceptions will
* not be retried:
* <ul>
* <li>{@link DeserializationException}</li>
* <li>{@link MessageConversionException}</li>
* <li>{@link ConversionException}</li>
* <li>{@link MethodArgumentResolutionException}</li>
* <li>{@link NoSuchMethodException}</li>
* <li>{@link ClassCastException}</li>
* </ul>
* All others will be retried, unless {@link #defaultFalse()} has been called.
* @param exceptionTypes the exception types.
* @see #removeClassification(Class)
* @see #setClassifications(Map, boolean)
*/
@SafeVarargs
@SuppressWarnings("varargs")
public final void addNotRetryableExceptions(Class<? extends Exception>... exceptionTypes) {
add(false, exceptionTypes);
}
/**
* Add exception types that can be retried. Call this after {@link #defaultFalse()} to
* specify those exception types that should be classified as true.
* All others will be retried, unless {@link #defaultFalse()} has been called.
* @param exceptionTypes the exception types.
* @since 2.8.4
* @see #removeClassification(Class)
* @see #setClassifications(Map, boolean)
*/
@SafeVarargs
@SuppressWarnings("varargs")
public final void addRetryableExceptions(Class<? extends Exception>... exceptionTypes) {
add(true, exceptionTypes);
}
@SafeVarargs
@SuppressWarnings("varargs")
private final void add(boolean classified, Class<? extends Exception>... exceptionTypes) {
Assert.notNull(exceptionTypes, "'exceptionTypes' cannot be null");
Assert.noNullElements(exceptionTypes, "'exceptionTypes' cannot contain nulls");
for (Class<? extends Exception> exceptionType : exceptionTypes) {
Assert.isTrue(Exception.class.isAssignableFrom(exceptionType),
() -> "exceptionType " + exceptionType + " must be an Exception");
this.classifier.getClassified().put(exceptionType, classified);
}
}
/**
* Remove an exception type from the configured list. By default, the following
* exceptions will not be retried:
* <ul>
* <li>{@link DeserializationException}</li>
* <li>{@link MessageConversionException}</li>
* <li>{@link ConversionException}</li>
* <li>{@link MethodArgumentResolutionException}</li>
* <li>{@link NoSuchMethodException}</li>
* <li>{@link ClassCastException}</li>
* </ul>
* All others will be retried, unless {@link #defaultFalse()} has been called.
* @param exceptionType the exception type.
* @return true if the removal was successful.
* @deprecated in favor of {@link #removeClassification(Class)}
* @see #addNotRetryableExceptions(Class...)
* @see #setClassifications(Map, boolean)
* @see #defaultFalse()
*/
@Deprecated
public boolean removeNotRetryableException(Class<? extends Exception> exceptionType) {
return Boolean.TRUE.equals(removeClassification(exceptionType)) ? true : false;
}
/**
* Remove an exception type from the configured list. By default, the following
* exceptions will not be retried:
* <ul>
* <li>{@link DeserializationException}</li>
* <li>{@link MessageConversionException}</li>
* <li>{@link ConversionException}</li>
* <li>{@link MethodArgumentResolutionException}</li>
* <li>{@link NoSuchMethodException}</li>
* <li>{@link ClassCastException}</li>
* </ul>
* All others will be retried, unless {@link #defaultFalse()} has been called.
* @param exceptionType the exception type.
* @return the classification of the exception if removal was successful;
* null otherwise.
* @since 2.8.4
* @see #addNotRetryableExceptions(Class...)
* @see #setClassifications(Map, boolean)
*/
@Nullable
public Boolean removeClassification(Class<? extends Exception> exceptionType) {
return this.classifier.getClassified().remove(exceptionType);
}
/**
* Extended to provide visibility to the current classified exceptions.
*
* @author Gary Russell
*
*/
@SuppressWarnings("serial")
private static final class ExtendedBinaryExceptionClassifier extends BinaryExceptionClassifier {
ExtendedBinaryExceptionClassifier(Map<Class<? extends Throwable>, Boolean> typeMap, boolean defaultValue) {
super(typeMap, defaultValue);
setTraverseCauses(true);
}
@Override
protected Map<Class<? extends Throwable>, Boolean> getClassified() { // NOSONAR worthless override
return super.getClassified();
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/listener/FailedBatchProcessor.java | /*
* Copyright 2021-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.BiConsumer;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.apache.kafka.common.TopicPartition;
import org.springframework.kafka.KafkaException;
import org.springframework.lang.Nullable;
import org.springframework.util.backoff.BackOff;
/**
* Subclass of {@link FailedRecordProcessor} that can process (and recover) a batch. If
* the listener throws a {@link BatchListenerFailedException}, the offsets prior to the
* failed record are committed and the remaining records have seeks performed. When the
* retries are exhausted, the failed record is sent to the recoverer instead of being
* included in the seeks. If other exceptions are thrown processing is delegated to the
* fallback handler.
*
* @author Gary Russell
* @since 2.8
*
*/
public abstract class FailedBatchProcessor extends FailedRecordProcessor {
private static final LoggingCommitCallback LOGGING_COMMIT_CALLBACK = new LoggingCommitCallback();
private final CommonErrorHandler fallbackBatchHandler;
/**
* Construct an instance with the provided properties.
* @param recoverer the recoverer.
* @param backOff the back off.
* @param fallbackHandler the fall back handler.
*/
public FailedBatchProcessor(@Nullable BiConsumer<ConsumerRecord<?, ?>, Exception> recoverer, BackOff backOff,
CommonErrorHandler fallbackHandler) {
super(recoverer, backOff);
this.fallbackBatchHandler = fallbackHandler;
}
protected void doHandle(Exception thrownException, ConsumerRecords<?, ?> data, Consumer<?, ?> consumer,
MessageListenerContainer container, Runnable invokeListener) {
BatchListenerFailedException batchListenerFailedException = getBatchListenerFailedException(thrownException);
if (batchListenerFailedException == null) {
this.logger.debug(thrownException, "Expected a BatchListenerFailedException; re-seeking batch");
this.fallbackBatchHandler.handleBatch(thrownException, data, consumer, container, invokeListener);
}
else {
ConsumerRecord<?, ?> record = batchListenerFailedException.getRecord();
int index = record != null ? findIndex(data, record) : batchListenerFailedException.getIndex();
if (index < 0 || index >= data.count()) {
this.logger.warn(batchListenerFailedException, () ->
String.format("Record not found in batch: %s-%d@%d; re-seeking batch",
record.topic(), record.partition(), record.offset()));
this.fallbackBatchHandler.handleBatch(thrownException, data, consumer, container, invokeListener);
}
else {
seekOrRecover(thrownException, data, consumer, container, index);
}
}
}
private int findIndex(ConsumerRecords<?, ?> data, ConsumerRecord<?, ?> record) {
if (record == null) {
return -1;
}
int i = 0;
Iterator<?> iterator = data.iterator();
while (iterator.hasNext()) {
ConsumerRecord<?, ?> candidate = (ConsumerRecord<?, ?>) iterator.next();
if (candidate.topic().equals(record.topic()) && candidate.partition() == record.partition()
&& candidate.offset() == record.offset()) {
break;
}
i++;
}
return i;
}
private void seekOrRecover(Exception thrownException, @Nullable ConsumerRecords<?, ?> data, Consumer<?, ?> consumer, MessageListenerContainer container, int indexArg) {
if (data == null) {
return;
}
Iterator<?> iterator = data.iterator();
List<ConsumerRecord<?, ?>> toCommit = new ArrayList<>();
List<ConsumerRecord<?, ?>> remaining = new ArrayList<>();
int index = indexArg;
while (iterator.hasNext()) {
ConsumerRecord<?, ?> record = (ConsumerRecord<?, ?>) iterator.next();
if (index-- > 0) {
toCommit.add(record);
}
else {
remaining.add(record);
}
}
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
toCommit.forEach(rec -> offsets.compute(new TopicPartition(rec.topic(), rec.partition()),
(key, val) -> new OffsetAndMetadata(rec.offset() + 1)));
if (offsets.size() > 0) {
commit(consumer, container, offsets);
}
if (remaining.size() > 0) {
SeekUtils.seekOrRecover(thrownException, remaining, consumer, container, false,
getRecoveryStrategy(remaining, thrownException), this.logger, getLogLevel());
ConsumerRecord<?, ?> recovered = remaining.get(0);
commit(consumer, container,
Collections.singletonMap(new TopicPartition(recovered.topic(), recovered.partition()),
new OffsetAndMetadata(recovered.offset() + 1)));
if (remaining.size() > 1) {
throw new KafkaException("Seek to current after exception", getLogLevel(), thrownException);
}
}
}
private void commit(Consumer<?, ?> consumer, MessageListenerContainer container, Map<TopicPartition, OffsetAndMetadata> offsets) {
boolean syncCommits = container.getContainerProperties().isSyncCommits();
Duration timeout = container.getContainerProperties().getSyncCommitTimeout();
if (syncCommits) {
consumer.commitSync(offsets, timeout);
}
else {
OffsetCommitCallback commitCallback = container.getContainerProperties().getCommitCallback();
if (commitCallback == null) {
commitCallback = LOGGING_COMMIT_CALLBACK;
}
consumer.commitAsync(offsets, commitCallback);
}
}
@Nullable
private BatchListenerFailedException getBatchListenerFailedException(Throwable throwableArg) {
if (throwableArg == null || throwableArg instanceof BatchListenerFailedException) {
return (BatchListenerFailedException) throwableArg;
}
BatchListenerFailedException target = null;
Throwable throwable = throwableArg;
Set<Throwable> checked = new HashSet<>();
while (throwable.getCause() != null && !checked.contains(throwable.getCause())) {
throwable = throwable.getCause();
checked.add(throwable);
if (throwable instanceof BatchListenerFailedException) {
target = (BatchListenerFailedException) throwable;
break;
}
}
return target;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.