index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/BufferExhaustedException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer; import org.apache.kafka.common.errors.TimeoutException; /** * This exception is thrown if the producer cannot allocate memory for a record within max.block.ms due to the buffer * being too full. * * In earlier versions a TimeoutException was thrown instead of this. To keep existing catch-clauses working * this class extends TimeoutException. * */ public class BufferExhaustedException extends TimeoutException { private static final long serialVersionUID = 1L; public BufferExhaustedException(String message) { super(message); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/Callback.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer; /** * A callback interface that the user can implement to allow code to execute when the request is complete. This callback * will generally execute in the background I/O thread so it should be fast. */ public interface Callback { /** * A callback method the user can implement to provide asynchronous handling of request completion. This method will * be called when the record sent to the server has been acknowledged. When exception is not null in the callback, * metadata will contain the special -1 value for all fields. If topicPartition cannot be * chosen, a -1 value will be assigned. * * @param metadata The metadata for the record that was sent (i.e. the partition and offset). An empty metadata * with -1 value for all fields will be returned if an error occurred. * @param exception The exception thrown during processing of this record. Null if no error occurred. * Possible thrown exceptions include: * <p> * Non-Retriable exceptions (fatal, the message will never be sent): * <ul> * <li>InvalidTopicException * <li>OffsetMetadataTooLargeException * <li>RecordBatchTooLargeException * <li>RecordTooLargeException * <li>UnknownServerException * <li>UnknownProducerIdException * <li>InvalidProducerEpochException * </ul> * Retriable exceptions (transient, may be covered by increasing #.retries): * <ul> * <li>CorruptRecordException * <li>InvalidMetadataException * <li>NotEnoughReplicasAfterAppendException * <li>NotEnoughReplicasException * <li>OffsetOutOfRangeException * <li>TimeoutException * <li>UnknownTopicOrPartitionException * </ul> */ void onCompletion(RecordMetadata metadata, Exception exception); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/KafkaProducer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer; import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.ClientUtils; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.NetworkClient; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetCommitCallback; import org.apache.kafka.clients.producer.internals.BufferPool; import org.apache.kafka.clients.producer.internals.BuiltInPartitioner; import org.apache.kafka.clients.producer.internals.KafkaProducerMetrics; import org.apache.kafka.clients.producer.internals.ProducerInterceptors; import org.apache.kafka.clients.producer.internals.ProducerMetadata; import org.apache.kafka.clients.producer.internals.ProducerMetrics; import org.apache.kafka.clients.producer.internals.RecordAccumulator; import org.apache.kafka.clients.producer.internals.Sender; import org.apache.kafka.clients.producer.internals.TransactionManager; import org.apache.kafka.clients.producer.internals.TransactionalRequestResult; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.AuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.errors.RecordTooLargeException; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.metrics.KafkaMetricsContext; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.MetricsContext; import org.apache.kafka.common.metrics.MetricsReporter; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.network.ChannelBuilder; import org.apache.kafka.common.network.Selector; import org.apache.kafka.common.record.AbstractRecords; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.requests.JoinGroupRequest; import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.common.superstream.Consts; import org.apache.kafka.common.superstream.Superstream; import org.apache.kafka.common.utils.AppInfoParser; import org.apache.kafka.common.utils.KafkaThread; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import java.net.InetSocketAddress; import java.time.Duration; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Properties; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import static org.apache.kafka.common.superstream.Consts.SUPERSTREAM_RESPONSE_TIMEOUT_ENV_VAR; /** * A Kafka client that publishes records to the Kafka cluster. * <P> * The producer is <i>thread safe</i> and sharing a single producer instance across threads will generally be faster than * having multiple instances. * <p> * Here is a simple example of using the producer to send records with strings containing sequential numbers as the key/value * pairs. * <pre> * {@code * Properties props = new Properties(); * props.put("bootstrap.servers", "localhost:9092"); * props.put("linger.ms", 1); * props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); * props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); * * Producer<String, String> producer = new KafkaProducer<>(props); * for (int i = 0; i < 100; i++) * producer.send(new ProducerRecord<String, String>("my-topic", Integer.toString(i), Integer.toString(i))); * * producer.close(); * }</pre> * <p> * The producer consists of a pool of buffer space that holds records that haven't yet been transmitted to the server * as well as a background I/O thread that is responsible for turning these records into requests and transmitting them * to the cluster. Failure to close the producer after use will leak these resources. * <p> * The {@link #send(ProducerRecord) send()} method is asynchronous. When called, it adds the record to a buffer of pending record sends * and immediately returns. This allows the producer to batch together individual records for efficiency. * <p> * The <code>acks</code> config controls the criteria under which requests are considered complete. The default setting "all" * will result in blocking on the full commit of the record, the slowest but most durable setting. * <p> * If the request fails, the producer can automatically retry. The <code>retries</code> setting defaults to <code>Integer.MAX_VALUE</code>, and * it's recommended to use <code>delivery.timeout.ms</code> to control retry behavior, instead of <code>retries</code>. * <p> * The producer maintains buffers of unsent records for each partition. These buffers are of a size specified by * the <code>batch.size</code> config. Making this larger can result in more batching, but requires more memory (since we will * generally have one of these buffers for each active partition). * <p> * By default a buffer is available to send immediately even if there is additional unused space in the buffer. However if you * want to reduce the number of requests you can set <code>linger.ms</code> to something greater than 0. This will * instruct the producer to wait up to that number of milliseconds before sending a request in hope that more records will * arrive to fill up the same batch. This is analogous to Nagle's algorithm in TCP. For example, in the code snippet above, * likely all 100 records would be sent in a single request since we set our linger time to 1 millisecond. However this setting * would add 1 millisecond of latency to our request waiting for more records to arrive if we didn't fill up the buffer. Note that * records that arrive close together in time will generally batch together even with <code>linger.ms=0</code>. So, under heavy load, * batching will occur regardless of the linger configuration; however setting this to something larger than 0 can lead to fewer, more * efficient requests when not under maximal load at the cost of a small amount of latency. * <p> * The <code>buffer.memory</code> controls the total amount of memory available to the producer for buffering. If records * are sent faster than they can be transmitted to the server then this buffer space will be exhausted. When the buffer space is * exhausted additional send calls will block. The threshold for time to block is determined by <code>max.block.ms</code> after which it throws * a TimeoutException. * <p> * The <code>key.serializer</code> and <code>value.serializer</code> instruct how to turn the key and value objects the user provides with * their <code>ProducerRecord</code> into bytes. You can use the included {@link org.apache.kafka.common.serialization.ByteArraySerializer} or * {@link org.apache.kafka.common.serialization.StringSerializer} for simple byte or string types. * <p> * From Kafka 0.11, the KafkaProducer supports two additional modes: the idempotent producer and the transactional producer. * The idempotent producer strengthens Kafka's delivery semantics from at least once to exactly once delivery. In particular * producer retries will no longer introduce duplicates. The transactional producer allows an application to send messages * to multiple partitions (and topics!) atomically. * </p> * <p> * From Kafka 3.0, the <code>enable.idempotence</code> configuration defaults to true. When enabling idempotence, * <code>retries</code> config will default to <code>Integer.MAX_VALUE</code> and the <code>acks</code> config will * default to <code>all</code>. There are no API changes for the idempotent producer, so existing applications will * not need to be modified to take advantage of this feature. * </p> * <p> * To take advantage of the idempotent producer, it is imperative to avoid application level re-sends since these cannot * be de-duplicated. As such, if an application enables idempotence, it is recommended to leave the <code>retries</code> * config unset, as it will be defaulted to <code>Integer.MAX_VALUE</code>. Additionally, if a {@link #send(ProducerRecord)} * returns an error even with infinite retries (for instance if the message expires in the buffer before being sent), * then it is recommended to shut down the producer and check the contents of the last produced message to ensure that * it is not duplicated. Finally, the producer can only guarantee idempotence for messages sent within a single session. * </p> * <p>To use the transactional producer and the attendant APIs, you must set the <code>transactional.id</code> * configuration property. If the <code>transactional.id</code> is set, idempotence is automatically enabled along with * the producer configs which idempotence depends on. Further, topics which are included in transactions should be configured * for durability. In particular, the <code>replication.factor</code> should be at least <code>3</code>, and the * <code>min.insync.replicas</code> for these topics should be set to 2. Finally, in order for transactional guarantees * to be realized from end-to-end, the consumers must be configured to read only committed messages as well. * </p> * <p> * The purpose of the <code>transactional.id</code> is to enable transaction recovery across multiple sessions of a * single producer instance. It would typically be derived from the shard identifier in a partitioned, stateful, application. * As such, it should be unique to each producer instance running within a partitioned application. * </p> * <p>All the new transactional APIs are blocking and will throw exceptions on failure. The example * below illustrates how the new APIs are meant to be used. It is similar to the example above, except that all * 100 messages are part of a single transaction. * </p> * <p> * <pre> * {@code * Properties props = new Properties(); * props.put("bootstrap.servers", "localhost:9092"); * props.put("transactional.id", "my-transactional-id"); * Producer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); * * producer.initTransactions(); * * try { * producer.beginTransaction(); * for (int i = 0; i < 100; i++) * producer.send(new ProducerRecord<>("my-topic", Integer.toString(i), Integer.toString(i))); * producer.commitTransaction(); * } catch (ProducerFencedException | OutOfOrderSequenceException | AuthorizationException e) { * // We can't recover from these exceptions, so our only option is to close the producer and exit. * producer.close(); * } catch (KafkaException e) { * // For all other exceptions, just abort the transaction and try again. * producer.abortTransaction(); * } * producer.close(); * } </pre> * </p> * <p> * As is hinted at in the example, there can be only one open transaction per producer. All messages sent between the * {@link #beginTransaction()} and {@link #commitTransaction()} calls will be part of a single transaction. When the * <code>transactional.id</code> is specified, all messages sent by the producer must be part of a transaction. * </p> * <p> * The transactional producer uses exceptions to communicate error states. In particular, it is not required * to specify callbacks for <code>producer.send()</code> or to call <code>.get()</code> on the returned Future: a * <code>KafkaException</code> would be thrown if any of the * <code>producer.send()</code> or transactional calls hit an irrecoverable error during a transaction. See the {@link #send(ProducerRecord)} * documentation for more details about detecting errors from a transactional send. * </p> * </p>By calling * <code>producer.abortTransaction()</code> upon receiving a <code>KafkaException</code> we can ensure that any * successful writes are marked as aborted, hence keeping the transactional guarantees. * </p> * <p> * This client can communicate with brokers that are version 0.10.0 or newer. Older or newer brokers may not support * certain client features. For instance, the transactional APIs need broker versions 0.11.0 or later. You will receive an * <code>UnsupportedVersionException</code> when invoking an API that is not available in the running broker version. * </p> */ public class KafkaProducer<K, V> implements Producer<K, V> { private final Logger log; private static final String JMX_PREFIX = "kafka.producer"; public static final String NETWORK_THREAD_PREFIX = "kafka-producer-network-thread"; public static final String PRODUCER_METRIC_GROUP_NAME = "producer-metrics"; private final String clientId; // Visible for testing final Metrics metrics; private final KafkaProducerMetrics producerMetrics; private final Partitioner partitioner; private final int maxRequestSize; private final long totalMemorySize; private final ProducerMetadata metadata; private final RecordAccumulator accumulator; private final Sender sender; private final Thread ioThread; private CompressionType compressionType; // ** Changed by Superstream - removed final private final Sensor errors; private final Time time; private final Serializer<K> keySerializer; private final Serializer<V> valueSerializer; private final ProducerConfig producerConfig; private final long maxBlockTimeMs; private final boolean partitionerIgnoreKeys; private final ProducerInterceptors<K, V> interceptors; private final ApiVersions apiVersions; private final TransactionManager transactionManager; //** added by Superstream Superstream superstreamConnection; // added by Superstream ** /** * A producer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings * are documented <a href="http://kafka.apache.org/documentation.html#producerconfigs">here</a>. Values can be * either strings or Objects of the appropriate type (for example a numeric configuration would accept either the * string "42" or the integer 42). * <p> * Note: after creating a {@code KafkaProducer} you must always {@link #close()} it to avoid resource leaks. * @param configs The producer configs * */ public KafkaProducer(final Map<String, Object> configs) { this(configs, null, null); } /** * A producer is instantiated by providing a set of key-value pairs as configuration, a key and a value {@link Serializer}. * Valid configuration strings are documented <a href="http://kafka.apache.org/documentation.html#producerconfigs">here</a>. * Values can be either strings or Objects of the appropriate type (for example a numeric configuration would accept * either the string "42" or the integer 42). * <p> * Note: after creating a {@code KafkaProducer} you must always {@link #close()} it to avoid resource leaks. * @param configs The producer configs * @param keySerializer The serializer for key that implements {@link Serializer}. The configure() method won't be * called in the producer when the serializer is passed in directly. * @param valueSerializer The serializer for value that implements {@link Serializer}. The configure() method won't * be called in the producer when the serializer is passed in directly. */ public KafkaProducer(Map<String, Object> configs, Serializer<K> keySerializer, Serializer<V> valueSerializer) { this(new ProducerConfig(ProducerConfig.appendSerializerToConfig(configs, keySerializer, valueSerializer)), keySerializer, valueSerializer, null, null, null, Time.SYSTEM); } /** * A producer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings * are documented <a href="http://kafka.apache.org/documentation.html#producerconfigs">here</a>. * <p> * Note: after creating a {@code KafkaProducer} you must always {@link #close()} it to avoid resource leaks. * @param properties The producer configs */ public KafkaProducer(Properties properties) { this(properties, null, null); } /** * A producer is instantiated by providing a set of key-value pairs as configuration, a key and a value {@link Serializer}. * Valid configuration strings are documented <a href="http://kafka.apache.org/documentation.html#producerconfigs">here</a>. * <p> * Note: after creating a {@code KafkaProducer} you must always {@link #close()} it to avoid resource leaks. * @param properties The producer configs * @param keySerializer The serializer for key that implements {@link Serializer}. The configure() method won't be * called in the producer when the serializer is passed in directly. * @param valueSerializer The serializer for value that implements {@link Serializer}. The configure() method won't * be called in the producer when the serializer is passed in directly. */ public KafkaProducer(Properties properties, Serializer<K> keySerializer, Serializer<V> valueSerializer) { this(Utils.propsToMap(properties), keySerializer, valueSerializer); } /** * Check if partitioner is deprecated and log a warning if it is. */ @SuppressWarnings("deprecation") private void warnIfPartitionerDeprecated() { // Using DefaultPartitioner and UniformStickyPartitioner is deprecated, see KIP-794. if (partitioner instanceof org.apache.kafka.clients.producer.internals.DefaultPartitioner) { log.warn("DefaultPartitioner is deprecated. Please clear " + ProducerConfig.PARTITIONER_CLASS_CONFIG + " configuration setting to get the default partitioning behavior"); } if (partitioner instanceof org.apache.kafka.clients.producer.UniformStickyPartitioner) { log.warn("UniformStickyPartitioner is deprecated. Please clear " + ProducerConfig.PARTITIONER_CLASS_CONFIG + " configuration setting and set " + ProducerConfig.PARTITIONER_IGNORE_KEYS_CONFIG + " to 'true' to get the uniform sticky partitioning behavior"); } } // visible for testing @SuppressWarnings("unchecked") KafkaProducer(ProducerConfig config, Serializer<K> keySerializer, Serializer<V> valueSerializer, ProducerMetadata metadata, KafkaClient kafkaClient, ProducerInterceptors<K, V> interceptors, Time time) { try { this.producerConfig = config; this.time = time; String transactionalId = config.getString(ProducerConfig.TRANSACTIONAL_ID_CONFIG); this.clientId = config.getString(ProducerConfig.CLIENT_ID_CONFIG); LogContext logContext; if (transactionalId == null) logContext = new LogContext(String.format("[Producer clientId=%s] ", clientId)); else logContext = new LogContext(String.format("[Producer clientId=%s, transactionalId=%s] ", clientId, transactionalId)); log = logContext.logger(KafkaProducer.class); log.trace("Starting the Kafka producer"); Map<String, String> metricTags = Collections.singletonMap("client-id", clientId); MetricConfig metricConfig = new MetricConfig().samples(config.getInt(ProducerConfig.METRICS_NUM_SAMPLES_CONFIG)) .timeWindow(config.getLong(ProducerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) .recordLevel(Sensor.RecordingLevel.forName(config.getString(ProducerConfig.METRICS_RECORDING_LEVEL_CONFIG))) .tags(metricTags); List<MetricsReporter> reporters = CommonClientConfigs.metricsReporters(clientId, config); MetricsContext metricsContext = new KafkaMetricsContext(JMX_PREFIX, config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); this.metrics = new Metrics(metricConfig, reporters, time, metricsContext); // ** Added by Superstream Map<String, Object> originalsMap = config.originals(); Superstream superstreamConn = (Superstream) originalsMap.get(Consts.superstreamConnectionKey); if (superstreamConn != null) { this.superstreamConnection = superstreamConn; this.superstreamConnection.clientCounters.setMetrics(this.metrics); this.superstreamConnection.setFullClientConfigs(config.values()); try { this.superstreamConnection.waitForSuperstreamConfigs(config); }catch (InterruptedException e){ this.superstreamConnection.getSuperstreamPrintStream().println("Error while waiting for producer superstream configs"); } } // Added by Superstream ** this.producerMetrics = new KafkaProducerMetrics(metrics); this.partitioner = config.getConfiguredInstance( ProducerConfig.PARTITIONER_CLASS_CONFIG, Partitioner.class, Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)); warnIfPartitionerDeprecated(); this.partitionerIgnoreKeys = config.getBoolean(ProducerConfig.PARTITIONER_IGNORE_KEYS_CONFIG); long retryBackoffMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG); if (keySerializer == null) { this.keySerializer = config.getConfiguredInstance(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, Serializer.class); this.keySerializer.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), true); } else { config.ignore(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); this.keySerializer = keySerializer; } if (valueSerializer == null) { this.valueSerializer = config.getConfiguredInstance(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, Serializer.class); this.valueSerializer.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), false); } else { config.ignore(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); this.valueSerializer = valueSerializer; } List<ProducerInterceptor<K, V>> interceptorList = (List) config.getConfiguredInstances( ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, ProducerInterceptor.class, Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)); if (interceptors != null) this.interceptors = interceptors; else this.interceptors = new ProducerInterceptors<>(interceptorList); ClusterResourceListeners clusterResourceListeners = configureClusterResourceListeners(this.keySerializer, this.valueSerializer, interceptorList, reporters); this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); this.totalMemorySize = config.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG); this.compressionType = CompressionType.forName(config.getString(ProducerConfig.COMPRESSION_TYPE_CONFIG)); this.maxBlockTimeMs = config.getLong(ProducerConfig.MAX_BLOCK_MS_CONFIG); int deliveryTimeoutMs = configureDeliveryTimeout(config, log); this.apiVersions = new ApiVersions(); this.transactionManager = configureTransactionState(config, logContext); // There is no need to do work required for adaptive partitioning, if we use a custom partitioner. boolean enableAdaptivePartitioning = partitioner == null && config.getBoolean(ProducerConfig.PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG); RecordAccumulator.PartitionerConfig partitionerConfig = new RecordAccumulator.PartitionerConfig( enableAdaptivePartitioning, config.getLong(ProducerConfig.PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG) ); // As per Kafka producer configuration documentation batch.size may be set to 0 to explicitly disable // batching which in practice actually means using a batch size of 1. int batchSize = Math.max(1, config.getInt(ProducerConfig.BATCH_SIZE_CONFIG)); this.accumulator = new RecordAccumulator(logContext, batchSize, this.compressionType, lingerMs(config), retryBackoffMs, deliveryTimeoutMs, partitionerConfig, metrics, PRODUCER_METRIC_GROUP_NAME, time, apiVersions, transactionManager, new BufferPool(this.totalMemorySize, batchSize, metrics, time, PRODUCER_METRIC_GROUP_NAME)); List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses( config.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG), config.getString(ProducerConfig.CLIENT_DNS_LOOKUP_CONFIG)); if (metadata != null) { this.metadata = metadata; } else { this.metadata = new ProducerMetadata(retryBackoffMs, config.getLong(ProducerConfig.METADATA_MAX_AGE_CONFIG), config.getLong(ProducerConfig.METADATA_MAX_IDLE_CONFIG), logContext, clusterResourceListeners, Time.SYSTEM); this.metadata.bootstrap(addresses); } this.errors = this.metrics.sensor("errors"); this.sender = newSender(logContext, kafkaClient, this.metadata); String ioThreadName = NETWORK_THREAD_PREFIX + " | " + clientId; this.ioThread = new KafkaThread(ioThreadName, this.sender, true); this.ioThread.start(); config.logUnused(); AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds()); log.debug("Kafka producer started"); } catch (Throwable t) { // call close methods if internal objects are already constructed this is to prevent resource leak. see KAFKA-2121 close(Duration.ofMillis(0), true); // now propagate the exception throw new KafkaException("Failed to construct kafka producer", t); } } // visible for testing KafkaProducer(ProducerConfig config, LogContext logContext, Metrics metrics, Serializer<K> keySerializer, Serializer<V> valueSerializer, ProducerMetadata metadata, RecordAccumulator accumulator, TransactionManager transactionManager, Sender sender, ProducerInterceptors<K, V> interceptors, Partitioner partitioner, Time time, KafkaThread ioThread) { this.producerConfig = config; this.time = time; this.clientId = config.getString(ProducerConfig.CLIENT_ID_CONFIG); this.log = logContext.logger(KafkaProducer.class); this.metrics = metrics; this.producerMetrics = new KafkaProducerMetrics(metrics); this.partitioner = partitioner; this.keySerializer = keySerializer; this.valueSerializer = valueSerializer; this.interceptors = interceptors; this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); this.totalMemorySize = config.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG); this.compressionType = CompressionType.forName(config.getString(ProducerConfig.COMPRESSION_TYPE_CONFIG)); this.maxBlockTimeMs = config.getLong(ProducerConfig.MAX_BLOCK_MS_CONFIG); this.partitionerIgnoreKeys = config.getBoolean(ProducerConfig.PARTITIONER_IGNORE_KEYS_CONFIG); this.apiVersions = new ApiVersions(); this.transactionManager = transactionManager; this.accumulator = accumulator; this.errors = this.metrics.sensor("errors"); this.metadata = metadata; this.sender = sender; this.ioThread = ioThread; } // visible for testing Sender newSender(LogContext logContext, KafkaClient kafkaClient, ProducerMetadata metadata) { int maxInflightRequests = producerConfig.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION); int requestTimeoutMs = producerConfig.getInt(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG); ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(producerConfig, time, logContext); ProducerMetrics metricsRegistry = new ProducerMetrics(this.metrics); Sensor throttleTimeSensor = Sender.throttleTimeSensor(metricsRegistry.senderMetrics); KafkaClient client = kafkaClient != null ? kafkaClient : new NetworkClient( new Selector(producerConfig.getLong(ProducerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG), this.metrics, time, "producer", channelBuilder, logContext), metadata, clientId, maxInflightRequests, producerConfig.getLong(ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG), producerConfig.getLong(ProducerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG), producerConfig.getInt(ProducerConfig.SEND_BUFFER_CONFIG), producerConfig.getInt(ProducerConfig.RECEIVE_BUFFER_CONFIG), requestTimeoutMs, producerConfig.getLong(ProducerConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG), producerConfig.getLong(ProducerConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG), time, true, apiVersions, throttleTimeSensor, logContext); short acks = Short.parseShort(producerConfig.getString(ProducerConfig.ACKS_CONFIG)); return new Sender(logContext, client, metadata, this.accumulator, maxInflightRequests == 1, producerConfig.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG), acks, producerConfig.getInt(ProducerConfig.RETRIES_CONFIG), metricsRegistry.senderMetrics, time, requestTimeoutMs, producerConfig.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG), this.transactionManager, apiVersions); } private static int lingerMs(ProducerConfig config) { return (int) Math.min(config.getLong(ProducerConfig.LINGER_MS_CONFIG), Integer.MAX_VALUE); } private static int configureDeliveryTimeout(ProducerConfig config, Logger log) { int deliveryTimeoutMs = config.getInt(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG); int lingerMs = lingerMs(config); int requestTimeoutMs = config.getInt(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG); int lingerAndRequestTimeoutMs = (int) Math.min((long) lingerMs + requestTimeoutMs, Integer.MAX_VALUE); if (deliveryTimeoutMs < lingerAndRequestTimeoutMs) { if (config.originals().containsKey(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG)) { // throw an exception if the user explicitly set an inconsistent value throw new ConfigException(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG + " should be equal to or larger than " + ProducerConfig.LINGER_MS_CONFIG + " + " + ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG); } else { // override deliveryTimeoutMs default value to lingerMs + requestTimeoutMs for backward compatibility deliveryTimeoutMs = lingerAndRequestTimeoutMs; log.warn("{} should be equal to or larger than {} + {}. Setting it to {}.", ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, ProducerConfig.LINGER_MS_CONFIG, ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, deliveryTimeoutMs); } } return deliveryTimeoutMs; } private TransactionManager configureTransactionState(ProducerConfig config, LogContext logContext) { TransactionManager transactionManager = null; if (config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { final String transactionalId = config.getString(ProducerConfig.TRANSACTIONAL_ID_CONFIG); final int transactionTimeoutMs = config.getInt(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG); final long retryBackoffMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG); transactionManager = new TransactionManager( logContext, transactionalId, transactionTimeoutMs, retryBackoffMs, apiVersions ); if (transactionManager.isTransactional()) log.info("Instantiated a transactional producer."); else log.info("Instantiated an idempotent producer."); } else { // ignore unretrieved configurations related to producer transaction config.ignore(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG); } return transactionManager; } /** * Needs to be called before any other methods when the {@code transactional.id} is set in the configuration. * This method does the following: * <ol> * <li>Ensures any transactions initiated by previous instances of the producer with the same * {@code transactional.id} are completed. If the previous instance had failed with a transaction in * progress, it will be aborted. If the last transaction had begun completion, * but not yet finished, this method awaits its completion.</li> * <li>Gets the internal producer id and epoch, used in all future transactional * messages issued by the producer.</li> * </ol> * Note that this method will raise {@link TimeoutException} if the transactional state cannot * be initialized before expiration of {@code max.block.ms}. Additionally, it will raise {@link InterruptException} * if interrupted. It is safe to retry in either case, but once the transactional state has been successfully * initialized, this method should no longer be used. * * @throws IllegalStateException if no {@code transactional.id} has been configured * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal error indicating the broker * does not support transactions (i.e. if its version is lower than 0.11.0.0) * @throws org.apache.kafka.common.errors.AuthorizationException fatal error indicating that the configured * {@code transactional.id} is not authorized. See the exception for more details * @throws KafkaException if the producer has encountered a previous fatal error or for any other unexpected error * @throws TimeoutException if the time taken for initialize the transaction has surpassed <code>max.block.ms</code>. * @throws InterruptException if the thread is interrupted while blocked */ public void initTransactions() { throwIfNoTransactionManager(); throwIfProducerClosed(); long now = time.nanoseconds(); TransactionalRequestResult result = transactionManager.initializeTransactions(); sender.wakeup(); result.await(maxBlockTimeMs, TimeUnit.MILLISECONDS); producerMetrics.recordInit(time.nanoseconds() - now); } /** * Should be called before the start of each new transaction. Note that prior to the first invocation * of this method, you must invoke {@link #initTransactions()} exactly one time. * * @throws IllegalStateException if no {@code transactional.id} has been configured or if {@link #initTransactions()} * has not yet been invoked * @throws ProducerFencedException if another producer with the same transactional.id is active * @throws org.apache.kafka.common.errors.InvalidProducerEpochException if the producer has attempted to produce with an old epoch * to the partition leader. See the exception for more details * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal error indicating the broker * does not support transactions (i.e. if its version is lower than 0.11.0.0) * @throws org.apache.kafka.common.errors.AuthorizationException fatal error indicating that the configured * {@code transactional.id} is not authorized. See the exception for more details * @throws KafkaException if the producer has encountered a previous fatal error or for any other unexpected error */ public void beginTransaction() throws ProducerFencedException { throwIfNoTransactionManager(); throwIfProducerClosed(); long now = time.nanoseconds(); transactionManager.beginTransaction(); producerMetrics.recordBeginTxn(time.nanoseconds() - now); } /** * Sends a list of specified offsets to the consumer group coordinator, and also marks * those offsets as part of the current transaction. These offsets will be considered * committed only if the transaction is committed successfully. The committed offset should * be the next message your application will consume, i.e. lastProcessedMessageOffset + 1. * <p> * This method should be used when you need to batch consumed and produced messages * together, typically in a consume-transform-produce pattern. Thus, the specified * {@code consumerGroupId} should be the same as config parameter {@code group.id} of the used * {@link KafkaConsumer consumer}. Note, that the consumer should have {@code enable.auto.commit=false} * and should also not commit offsets manually (via {@link KafkaConsumer#commitSync(Map) sync} or * {@link KafkaConsumer#commitAsync(Map, OffsetCommitCallback) async} commits). * * <p> * This method is a blocking call that waits until the request has been received and acknowledged by the consumer group * coordinator; but the offsets are not considered as committed until the transaction itself is successfully committed later (via * the {@link #commitTransaction()} call). * * @throws IllegalStateException if no transactional.id has been configured, no transaction has been started * @throws ProducerFencedException fatal error indicating another producer with the same transactional.id is active * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal error indicating the broker * does not support transactions (i.e. if its version is lower than 0.11.0.0) * @throws org.apache.kafka.common.errors.UnsupportedForMessageFormatException fatal error indicating the message * format used for the offsets topic on the broker does not support transactions * @throws org.apache.kafka.common.errors.AuthorizationException fatal error indicating that the configured * transactional.id is not authorized, or the consumer group id is not authorized. * @throws org.apache.kafka.common.errors.InvalidProducerEpochException if the producer has attempted to produce with an old epoch * to the partition leader. See the exception for more details * @throws TimeoutException if the time taken for sending the offsets has surpassed <code>max.block.ms</code>. * @throws KafkaException if the producer has encountered a previous fatal or abortable error, or for any * other unexpected error * * @deprecated Since 3.0.0, please use {@link #sendOffsetsToTransaction(Map, ConsumerGroupMetadata)} instead. */ @Deprecated public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) throws ProducerFencedException { sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId)); } /** * Sends a list of specified offsets to the consumer group coordinator, and also marks * those offsets as part of the current transaction. These offsets will be considered * committed only if the transaction is committed successfully. The committed offset should * be the next message your application will consume, i.e. lastProcessedMessageOffset + 1. * <p> * This method should be used when you need to batch consumed and produced messages * together, typically in a consume-transform-produce pattern. Thus, the specified * {@code groupMetadata} should be extracted from the used {@link KafkaConsumer consumer} via * {@link KafkaConsumer#groupMetadata()} to leverage consumer group metadata. This will provide * stronger fencing than just supplying the {@code consumerGroupId} and passing in {@code new ConsumerGroupMetadata(consumerGroupId)}, * however note that the full set of consumer group metadata returned by {@link KafkaConsumer#groupMetadata()} * requires the brokers to be on version 2.5 or newer to understand. * * <p> * This method is a blocking call that waits until the request has been received and acknowledged by the consumer group * coordinator; but the offsets are not considered as committed until the transaction itself is successfully committed later (via * the {@link #commitTransaction()} call). * * <p> * Note, that the consumer should have {@code enable.auto.commit=false} and should * also not commit offsets manually (via {@link KafkaConsumer#commitSync(Map) sync} or * {@link KafkaConsumer#commitAsync(Map, OffsetCommitCallback) async} commits). * This method will raise {@link TimeoutException} if the producer cannot send offsets before expiration of {@code max.block.ms}. * Additionally, it will raise {@link InterruptException} if interrupted. * * @throws IllegalStateException if no transactional.id has been configured or no transaction has been started. * @throws ProducerFencedException fatal error indicating another producer with the same transactional.id is active * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal error indicating the broker * does not support transactions (i.e. if its version is lower than 0.11.0.0) or * the broker doesn't support the latest version of transactional API with all consumer group metadata * (i.e. if its version is lower than 2.5.0). * @throws org.apache.kafka.common.errors.UnsupportedForMessageFormatException fatal error indicating the message * format used for the offsets topic on the broker does not support transactions * @throws org.apache.kafka.common.errors.AuthorizationException fatal error indicating that the configured * transactional.id is not authorized, or the consumer group id is not authorized. * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried * (e.g. if the consumer has been kicked out of the group). Users should handle this by aborting the transaction. * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this producer instance gets fenced by broker due to a * mis-configured consumer instance id within group metadata. * @throws org.apache.kafka.common.errors.InvalidProducerEpochException if the producer has attempted to produce with an old epoch * to the partition leader. See the exception for more details * @throws KafkaException if the producer has encountered a previous fatal or abortable error, or for any * other unexpected error * @throws TimeoutException if the time taken for sending the offsets has surpassed <code>max.block.ms</code>. * @throws InterruptException if the thread is interrupted while blocked */ public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, ConsumerGroupMetadata groupMetadata) throws ProducerFencedException { throwIfInvalidGroupMetadata(groupMetadata); throwIfNoTransactionManager(); throwIfProducerClosed(); if (!offsets.isEmpty()) { long start = time.nanoseconds(); TransactionalRequestResult result = transactionManager.sendOffsetsToTransaction(offsets, groupMetadata); sender.wakeup(); result.await(maxBlockTimeMs, TimeUnit.MILLISECONDS); producerMetrics.recordSendOffsets(time.nanoseconds() - start); } } /** * Commits the ongoing transaction. This method will flush any unsent records before actually committing the transaction. * <p> * Further, if any of the {@link #send(ProducerRecord)} calls which were part of the transaction hit irrecoverable * errors, this method will throw the last received exception immediately and the transaction will not be committed. * So all {@link #send(ProducerRecord)} calls in a transaction must succeed in order for this method to succeed. * <p> * If the transaction is committed successfully and this method returns without throwing an exception, it is guaranteed * that all {@link Callback callbacks} for records in the transaction will have been invoked and completed. * Note that exceptions thrown by callbacks are ignored; the producer proceeds to commit the transaction in any case. * <p> * Note that this method will raise {@link TimeoutException} if the transaction cannot be committed before expiration * of {@code max.block.ms}, but this does not mean the request did not actually reach the broker. In fact, it only indicates * that we cannot get the acknowledgement response in time, so it's up to the application's logic * to decide how to handle timeouts. * Additionally, it will raise {@link InterruptException} if interrupted. * It is safe to retry in either case, but it is not possible to attempt a different operation (such as abortTransaction) * since the commit may already be in the progress of completing. If not retrying, the only option is to close the producer. * * @throws IllegalStateException if no transactional.id has been configured or no transaction has been started * @throws ProducerFencedException fatal error indicating another producer with the same transactional.id is active * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal error indicating the broker * does not support transactions (i.e. if its version is lower than 0.11.0.0) * @throws org.apache.kafka.common.errors.AuthorizationException fatal error indicating that the configured * transactional.id is not authorized. See the exception for more details * @throws org.apache.kafka.common.errors.InvalidProducerEpochException if the producer has attempted to produce with an old epoch * to the partition leader. See the exception for more details * @throws KafkaException if the producer has encountered a previous fatal or abortable error, or for any * other unexpected error * @throws TimeoutException if the time taken for committing the transaction has surpassed <code>max.block.ms</code>. * @throws InterruptException if the thread is interrupted while blocked */ public void commitTransaction() throws ProducerFencedException { throwIfNoTransactionManager(); throwIfProducerClosed(); long commitStart = time.nanoseconds(); TransactionalRequestResult result = transactionManager.beginCommit(); sender.wakeup(); result.await(maxBlockTimeMs, TimeUnit.MILLISECONDS); producerMetrics.recordCommitTxn(time.nanoseconds() - commitStart); } /** * Aborts the ongoing transaction. Any unflushed produce messages will be aborted when this call is made. * This call will throw an exception immediately if any prior {@link #send(ProducerRecord)} calls failed with a * {@link ProducerFencedException} or an instance of {@link org.apache.kafka.common.errors.AuthorizationException}. * <p> * Note that this method will raise {@link TimeoutException} if the transaction cannot be aborted before expiration * of {@code max.block.ms}, but this does not mean the request did not actually reach the broker. In fact, it only indicates * that we cannot get the acknowledgement response in time, so it's up to the application's logic * to decide how to handle timeouts. Additionally, it will raise {@link InterruptException} if interrupted. * It is safe to retry in either case, but it is not possible to attempt a different operation (such as {@link #commitTransaction}) * since the abort may already be in the progress of completing. If not retrying, the only option is to close the producer. * * @throws IllegalStateException if no transactional.id has been configured or no transaction has been started * @throws ProducerFencedException fatal error indicating another producer with the same transactional.id is active * @throws org.apache.kafka.common.errors.InvalidProducerEpochException if the producer has attempted to produce with an old epoch * to the partition leader. See the exception for more details * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal error indicating the broker * does not support transactions (i.e. if its version is lower than 0.11.0.0) * @throws org.apache.kafka.common.errors.AuthorizationException fatal error indicating that the configured * transactional.id is not authorized. See the exception for more details * @throws KafkaException if the producer has encountered a previous fatal error or for any other unexpected error * @throws TimeoutException if the time taken for aborting the transaction has surpassed <code>max.block.ms</code>. * @throws InterruptException if the thread is interrupted while blocked */ public void abortTransaction() throws ProducerFencedException { throwIfNoTransactionManager(); throwIfProducerClosed(); log.info("Aborting incomplete transaction"); long abortStart = time.nanoseconds(); TransactionalRequestResult result = transactionManager.beginAbort(); sender.wakeup(); result.await(maxBlockTimeMs, TimeUnit.MILLISECONDS); producerMetrics.recordAbortTxn(time.nanoseconds() - abortStart); } /** * Asynchronously send a record to a topic. Equivalent to <code>send(record, null)</code>. * See {@link #send(ProducerRecord, Callback)} for details. */ @Override public Future<RecordMetadata> send(ProducerRecord<K, V> record) { return send(record, null); } /** * Asynchronously send a record to a topic and invoke the provided callback when the send has been acknowledged. * <p> * The send is asynchronous and this method will return immediately once the record has been stored in the buffer of * records waiting to be sent. This allows sending many records in parallel without blocking to wait for the * response after each one. * <p> * The result of the send is a {@link RecordMetadata} specifying the partition the record was sent to, the offset * it was assigned and the timestamp of the record. If the producer is configured with acks = 0, the {@link RecordMetadata} * will have offset = -1 because the producer does not wait for the acknowledgement from the broker. * If {@link org.apache.kafka.common.record.TimestampType#CREATE_TIME CreateTime} is used by the topic, the timestamp * will be the user provided timestamp or the record send time if the user did not specify a timestamp for the * record. If {@link org.apache.kafka.common.record.TimestampType#LOG_APPEND_TIME LogAppendTime} is used for the * topic, the timestamp will be the Kafka broker local time when the message is appended. * <p> * Since the send call is asynchronous it returns a {@link java.util.concurrent.Future Future} for the * {@link RecordMetadata} that will be assigned to this record. Invoking {@link java.util.concurrent.Future#get() * get()} on this future will block until the associated request completes and then return the metadata for the record * or throw any exception that occurred while sending the record. * <p> * If you want to simulate a simple blocking call you can call the <code>get()</code> method immediately: * * <pre> * {@code * byte[] key = "key".getBytes(); * byte[] value = "value".getBytes(); * ProducerRecord<byte[],byte[]> record = new ProducerRecord<byte[],byte[]>("my-topic", key, value) * producer.send(record).get(); * }</pre> * <p> * Fully non-blocking usage can make use of the {@link Callback} parameter to provide a callback that * will be invoked when the request is complete. * * <pre> * {@code * ProducerRecord<byte[],byte[]> record = new ProducerRecord<byte[],byte[]>("the-topic", key, value); * producer.send(myRecord, * new Callback() { * public void onCompletion(RecordMetadata metadata, Exception e) { * if(e != null) { * e.printStackTrace(); * } else { * System.out.println("The offset of the record we just sent is: " + metadata.offset()); * } * } * }); * } * </pre> * * Callbacks for records being sent to the same partition are guaranteed to execute in order. That is, in the * following example <code>callback1</code> is guaranteed to execute before <code>callback2</code>: * * <pre> * {@code * producer.send(new ProducerRecord<byte[],byte[]>(topic, partition, key1, value1), callback1); * producer.send(new ProducerRecord<byte[],byte[]>(topic, partition, key2, value2), callback2); * } * </pre> * <p> * When used as part of a transaction, it is not necessary to define a callback or check the result of the future * in order to detect errors from <code>send</code>. If any of the send calls failed with an irrecoverable error, * the final {@link #commitTransaction()} call will fail and throw the exception from the last failed send. When * this happens, your application should call {@link #abortTransaction()} to reset the state and continue to send * data. * </p> * <p> * Some transactional send errors cannot be resolved with a call to {@link #abortTransaction()}. In particular, * if a transactional send finishes with a {@link ProducerFencedException}, a {@link org.apache.kafka.common.errors.OutOfOrderSequenceException}, * a {@link org.apache.kafka.common.errors.UnsupportedVersionException}, or an * {@link org.apache.kafka.common.errors.AuthorizationException}, then the only option left is to call {@link #close()}. * Fatal errors cause the producer to enter a defunct state in which future API calls will continue to raise * the same underlying error wrapped in a new {@link KafkaException}. * </p> * <p> * It is a similar picture when idempotence is enabled, but no <code>transactional.id</code> has been configured. * In this case, {@link org.apache.kafka.common.errors.UnsupportedVersionException} and * {@link org.apache.kafka.common.errors.AuthorizationException} are considered fatal errors. However, * {@link ProducerFencedException} does not need to be handled. Additionally, it is possible to continue * sending after receiving an {@link org.apache.kafka.common.errors.OutOfOrderSequenceException}, but doing so * can result in out of order delivery of pending messages. To ensure proper ordering, you should close the * producer and create a new instance. * </p> * <p> * If the message format of the destination topic is not upgraded to 0.11.0.0, idempotent and transactional * produce requests will fail with an {@link org.apache.kafka.common.errors.UnsupportedForMessageFormatException} * error. If this is encountered during a transaction, it is possible to abort and continue. But note that future * sends to the same topic will continue receiving the same exception until the topic is upgraded. * </p> * <p> * Note that callbacks will generally execute in the I/O thread of the producer and so should be reasonably fast or * they will delay the sending of messages from other threads. If you want to execute blocking or computationally * expensive callbacks it is recommended to use your own {@link java.util.concurrent.Executor} in the callback body * to parallelize processing. * * @param record The record to send * @param callback A user-supplied callback to execute when the record has been acknowledged by the server (null * indicates no callback) * * @throws AuthenticationException if authentication fails. See the exception for more details * @throws AuthorizationException fatal error indicating that the producer is not allowed to write * @throws IllegalStateException if a transactional.id has been configured and no transaction has been started, or * when send is invoked after producer has been closed. * @throws InterruptException If the thread is interrupted while blocked * @throws SerializationException If the key or value are not valid objects given the configured serializers * @throws TimeoutException If the record could not be appended to the send buffer due to memory unavailable * or missing metadata within {@code max.block.ms}. * @throws KafkaException If a Kafka related error occurs that does not belong to the public API exceptions. */ @Override public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) { // intercept the record, which can be potentially modified; this method does not throw exceptions ProducerRecord<K, V> interceptedRecord = this.interceptors.onSend(record); return doSend(interceptedRecord, callback); } // Verify that this producer instance has not been closed. This method throws IllegalStateException if the producer // has already been closed. private void throwIfProducerClosed() { if (sender == null || !sender.isRunning()) throw new IllegalStateException("Cannot perform operation after producer has been closed"); } /** * Call deprecated {@link Partitioner#onNewBatch} */ @SuppressWarnings("deprecation") private void onNewBatch(String topic, Cluster cluster, int prevPartition) { assert partitioner != null; partitioner.onNewBatch(topic, cluster, prevPartition); } /** * Implementation of asynchronously send a record to a topic. */ private Future<RecordMetadata> doSend(ProducerRecord<K, V> record, Callback callback) { // Append callback takes care of the following: // - call interceptors and user callback on completion // - remember partition that is calculated in RecordAccumulator.append AppendCallbacks<K, V> appendCallbacks = new AppendCallbacks<K, V>(callback, this.interceptors, record); try { throwIfProducerClosed(); // first make sure the metadata for the topic is available long nowMs = time.milliseconds(); ClusterAndWaitTime clusterAndWaitTime; try { clusterAndWaitTime = waitOnMetadata(record.topic(), record.partition(), nowMs, maxBlockTimeMs); } catch (KafkaException e) { if (metadata.isClosed()) throw new KafkaException("Producer closed while send in progress", e); throw e; } nowMs += clusterAndWaitTime.waitedOnMetadataMs; long remainingWaitMs = Math.max(0, maxBlockTimeMs - clusterAndWaitTime.waitedOnMetadataMs); Cluster cluster = clusterAndWaitTime.cluster; byte[] serializedKey; try { serializedKey = keySerializer.serialize(record.topic(), record.headers(), record.key()); } catch (ClassCastException cce) { throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() + " specified in key.serializer", cce); } byte[] serializedValue; try { serializedValue = valueSerializer.serialize(record.topic(), record.headers(), record.value()); } catch (ClassCastException cce) { throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() + " specified in value.serializer", cce); } // Try to calculate partition, but note that after this call it can be RecordMetadata.UNKNOWN_PARTITION, // which means that the RecordAccumulator would pick a partition using built-in logic (which may // take into account broker load, the amount of data produced to each partition, etc.). int partition = partition(record, serializedKey, serializedValue, cluster); //** added by Superstream if (superstreamConnection != null) { if (superstreamConnection.compressionEnabled) { if (this.compressionType.name() != (superstreamConnection.compressionType)){ switch (superstreamConnection.compressionType.toLowerCase()) { case "gzip": accumulator.updateCompressionType(CompressionType.GZIP); this.compressionType = CompressionType.GZIP; break; case "snappy": accumulator.updateCompressionType(CompressionType.SNAPPY); this.compressionType = CompressionType.SNAPPY; break; case "lz4": accumulator.updateCompressionType(CompressionType.LZ4); this.compressionType = CompressionType.LZ4; break; case "zstd": accumulator.updateCompressionType(CompressionType.ZSTD); this.compressionType = CompressionType.ZSTD; break; default: this.superstreamConnection.getSuperstreamPrintStream().println("Superstream: unknown compression type: " + superstreamConnection.compressionType + ", defaulting to ZSTD"); accumulator.updateCompressionType(CompressionType.ZSTD); this.compressionType = CompressionType.ZSTD; break; } } } else { if (superstreamConnection.compressionTurnedOffBySuperstream) { accumulator.updateCompressionType(CompressionType.NONE); this.compressionType = CompressionType.NONE; } } } // added by Superstream ** setReadOnly(record.headers()); Header[] headers = record.headers().toArray(); int serializedSize = AbstractRecords.estimateSizeInBytesUpperBound(apiVersions.maxUsableProduceMagic(), compressionType, serializedKey, serializedValue, headers); ensureValidRecordSize(serializedSize); long timestamp = record.timestamp() == null ? nowMs : record.timestamp(); // A custom partitioner may take advantage on the onNewBatch callback. boolean abortOnNewBatch = partitioner != null; // Append the record to the accumulator. Note, that the actual partition may be // calculated there and can be accessed via appendCallbacks.topicPartition. RecordAccumulator.RecordAppendResult result = accumulator.append(record.topic(), partition, timestamp, serializedKey, serializedValue, headers, appendCallbacks, remainingWaitMs, abortOnNewBatch, nowMs, cluster); assert appendCallbacks.getPartition() != RecordMetadata.UNKNOWN_PARTITION; if (result.abortForNewBatch) { int prevPartition = partition; onNewBatch(record.topic(), cluster, prevPartition); partition = partition(record, serializedKey, serializedValue, cluster); if (log.isTraceEnabled()) { log.trace("Retrying append due to new batch creation for topic {} partition {}. The old partition was {}", record.topic(), partition, prevPartition); } result = accumulator.append(record.topic(), partition, timestamp, serializedKey, serializedValue, headers, appendCallbacks, remainingWaitMs, false, nowMs, cluster); } // Add the partition to the transaction (if in progress) after it has been successfully // appended to the accumulator. We cannot do it before because the partition may be // unknown or the initially selected partition may be changed when the batch is closed // (as indicated by `abortForNewBatch`). Note that the `Sender` will refuse to dequeue // batches from the accumulator until they have been added to the transaction. if (transactionManager != null) { transactionManager.maybeAddPartition(appendCallbacks.topicPartition()); } if (result.batchIsFull || result.newBatchCreated) { log.trace("Waking up the sender since topic {} partition {} is either full or getting a new batch", record.topic(), appendCallbacks.getPartition()); this.sender.wakeup(); } return result.future; // handling exceptions and record the errors; // for API exceptions return them in the future, // for other exceptions throw directly } catch (ApiException e) { log.debug("Exception occurred during message send:", e); if (callback != null) { TopicPartition tp = appendCallbacks.topicPartition(); RecordMetadata nullMetadata = new RecordMetadata(tp, -1, -1, RecordBatch.NO_TIMESTAMP, -1, -1); callback.onCompletion(nullMetadata, e); } this.errors.record(); this.interceptors.onSendError(record, appendCallbacks.topicPartition(), e); if (transactionManager != null) { transactionManager.maybeTransitionToErrorState(e); } return new FutureFailure(e); } catch (InterruptedException e) { this.errors.record(); this.interceptors.onSendError(record, appendCallbacks.topicPartition(), e); throw new InterruptException(e); } catch (KafkaException e) { this.errors.record(); this.interceptors.onSendError(record, appendCallbacks.topicPartition(), e); throw e; } catch (Exception e) { // we notify interceptor about all exceptions, since onSend is called before anything else in this method this.interceptors.onSendError(record, appendCallbacks.topicPartition(), e); throw e; } } private void setReadOnly(Headers headers) { if (headers instanceof RecordHeaders) { ((RecordHeaders) headers).setReadOnly(); } } /** * Wait for cluster metadata including partitions for the given topic to be available. * @param topic The topic we want metadata for * @param partition A specific partition expected to exist in metadata, or null if there's no preference * @param nowMs The current time in ms * @param maxWaitMs The maximum time in ms for waiting on the metadata * @return The cluster containing topic metadata and the amount of time we waited in ms * @throws TimeoutException if metadata could not be refreshed within {@code max.block.ms} * @throws KafkaException for all Kafka-related exceptions, including the case where this method is called after producer close */ private ClusterAndWaitTime waitOnMetadata(String topic, Integer partition, long nowMs, long maxWaitMs) throws InterruptedException { // add topic to metadata topic list if it is not there already and reset expiry Cluster cluster = metadata.fetch(); if (cluster.invalidTopics().contains(topic)) throw new InvalidTopicException(topic); metadata.add(topic, nowMs); Integer partitionsCount = cluster.partitionCountForTopic(topic); // Return cached metadata if we have it, and if the record's partition is either undefined // or within the known partition range if (partitionsCount != null && (partition == null || partition < partitionsCount)) return new ClusterAndWaitTime(cluster, 0); long remainingWaitMs = maxWaitMs; long elapsed = 0; // Issue metadata requests until we have metadata for the topic and the requested partition, // or until maxWaitTimeMs is exceeded. This is necessary in case the metadata // is stale and the number of partitions for this topic has increased in the meantime. long nowNanos = time.nanoseconds(); do { if (partition != null) { log.trace("Requesting metadata update for partition {} of topic {}.", partition, topic); } else { log.trace("Requesting metadata update for topic {}.", topic); } metadata.add(topic, nowMs + elapsed); int version = metadata.requestUpdateForTopic(topic); sender.wakeup(); try { metadata.awaitUpdate(version, remainingWaitMs); } catch (TimeoutException ex) { // Rethrow with original maxWaitMs to prevent logging exception with remainingWaitMs throw new TimeoutException( String.format("Topic %s not present in metadata after %d ms.", topic, maxWaitMs)); } cluster = metadata.fetch(); elapsed = time.milliseconds() - nowMs; if (elapsed >= maxWaitMs) { throw new TimeoutException(partitionsCount == null ? String.format("Topic %s not present in metadata after %d ms.", topic, maxWaitMs) : String.format("Partition %d of topic %s with partition count %d is not present in metadata after %d ms.", partition, topic, partitionsCount, maxWaitMs)); } metadata.maybeThrowExceptionForTopic(topic); remainingWaitMs = maxWaitMs - elapsed; partitionsCount = cluster.partitionCountForTopic(topic); } while (partitionsCount == null || (partition != null && partition >= partitionsCount)); producerMetrics.recordMetadataWait(time.nanoseconds() - nowNanos); return new ClusterAndWaitTime(cluster, elapsed); } /** * Validate that the record size isn't too large */ private void ensureValidRecordSize(int size) { if (size > maxRequestSize) throw new RecordTooLargeException("The message is " + size + " bytes when serialized which is larger than " + maxRequestSize + ", which is the value of the " + ProducerConfig.MAX_REQUEST_SIZE_CONFIG + " configuration."); if (size > totalMemorySize) throw new RecordTooLargeException("The message is " + size + " bytes when serialized which is larger than the total memory buffer you have configured with the " + ProducerConfig.BUFFER_MEMORY_CONFIG + " configuration."); } /** * Invoking this method makes all buffered records immediately available to send (even if <code>linger.ms</code> is * greater than 0) and blocks on the completion of the requests associated with these records. The post-condition * of <code>flush()</code> is that any previously sent record will have completed (e.g. <code>Future.isDone() == true</code>). * A request is considered completed when it is successfully acknowledged * according to the <code>acks</code> configuration you have specified or else it results in an error. * <p> * Other threads can continue sending records while one thread is blocked waiting for a flush call to complete, * however no guarantee is made about the completion of records sent after the flush call begins. * <p> * This method can be useful when consuming from some input system and producing into Kafka. The <code>flush()</code> call * gives a convenient way to ensure all previously sent messages have actually completed. * <p> * This example shows how to consume from one Kafka topic and produce to another Kafka topic: * <pre> * {@code * for(ConsumerRecord<String, String> record: consumer.poll(100)) * producer.send(new ProducerRecord("my-topic", record.key(), record.value()); * producer.flush(); * consumer.commitSync(); * } * </pre> * * Note that the above example may drop records if the produce request fails. If we want to ensure that this does not occur * we need to set <code>retries=&lt;large_number&gt;</code> in our config. * </p> * <p> * Applications don't need to call this method for transactional producers, since the {@link #commitTransaction()} will * flush all buffered records before performing the commit. This ensures that all the {@link #send(ProducerRecord)} * calls made since the previous {@link #beginTransaction()} are completed before the commit. * </p> * * @throws InterruptException If the thread is interrupted while blocked */ @Override public void flush() { log.trace("Flushing accumulated records in producer."); long start = time.nanoseconds(); this.accumulator.beginFlush(); this.sender.wakeup(); try { this.accumulator.awaitFlushCompletion(); } catch (InterruptedException e) { throw new InterruptException("Flush interrupted.", e); } finally { producerMetrics.recordFlush(time.nanoseconds() - start); } } /** * Get the partition metadata for the given topic. This can be used for custom partitioning. * @throws AuthenticationException if authentication fails. See the exception for more details * @throws AuthorizationException if not authorized to the specified topic. See the exception for more details * @throws InterruptException if the thread is interrupted while blocked * @throws TimeoutException if metadata could not be refreshed within {@code max.block.ms} * @throws KafkaException for all Kafka-related exceptions, including the case where this method is called after producer close */ @Override public List<PartitionInfo> partitionsFor(String topic) { Objects.requireNonNull(topic, "topic cannot be null"); try { return waitOnMetadata(topic, null, time.milliseconds(), maxBlockTimeMs).cluster.partitionsForTopic(topic); } catch (InterruptedException e) { throw new InterruptException(e); } } /** * Get the full set of internal metrics maintained by the producer. */ @Override public Map<MetricName, ? extends Metric> metrics() { return Collections.unmodifiableMap(this.metrics.metrics()); } /** * Close this producer. This method blocks until all previously sent requests complete. * This method is equivalent to <code>close(Long.MAX_VALUE, TimeUnit.MILLISECONDS)</code>. * <p> * <strong>If close() is called from {@link Callback}, a warning message will be logged and close(0, TimeUnit.MILLISECONDS) * will be called instead. We do this because the sender thread would otherwise try to join itself and * block forever.</strong> * <p> * * @throws InterruptException If the thread is interrupted while blocked. * @throws KafkaException If an unexpected error occurs while trying to close the client, this error should be treated * as fatal and indicate the client is no longer usable. */ @Override public void close() { close(Duration.ofMillis(Long.MAX_VALUE)); } /** * This method waits up to <code>timeout</code> for the producer to complete the sending of all incomplete requests. * <p> * If the producer is unable to complete all requests before the timeout expires, this method will fail * any unsent and unacknowledged records immediately. It will also abort the ongoing transaction if it's not * already completing. * <p> * If invoked from within a {@link Callback} this method will not block and will be equivalent to * <code>close(Duration.ofMillis(0))</code>. This is done since no further sending will happen while * blocking the I/O thread of the producer. * * @param timeout The maximum time to wait for producer to complete any pending requests. The value should be * non-negative. Specifying a timeout of zero means do not wait for pending send requests to complete. * @throws InterruptException If the thread is interrupted while blocked. * @throws KafkaException If an unexpected error occurs while trying to close the client, this error should be treated * as fatal and indicate the client is no longer usable. * @throws IllegalArgumentException If the <code>timeout</code> is negative. * */ @Override public void close(Duration timeout) { close(timeout, false); } private void close(Duration timeout, boolean swallowException) { long timeoutMs = timeout.toMillis(); if (timeoutMs < 0) throw new IllegalArgumentException("The timeout cannot be negative."); log.info("Closing the Kafka producer with timeoutMillis = {} ms.", timeoutMs); // this will keep track of the first encountered exception AtomicReference<Throwable> firstException = new AtomicReference<>(); boolean invokedFromCallback = Thread.currentThread() == this.ioThread; if (timeoutMs > 0) { if (invokedFromCallback) { log.warn("Overriding close timeout {} ms to 0 ms in order to prevent useless blocking due to self-join. " + "This means you have incorrectly invoked close with a non-zero timeout from the producer call-back.", timeoutMs); } else { // Try to close gracefully. if (this.sender != null) this.sender.initiateClose(); if (this.ioThread != null) { try { this.ioThread.join(timeoutMs); } catch (InterruptedException t) { firstException.compareAndSet(null, new InterruptException(t)); log.error("Interrupted while joining ioThread", t); } } } } if (this.sender != null && this.ioThread != null && this.ioThread.isAlive()) { log.info("Proceeding to force close the producer since pending requests could not be completed " + "within timeout {} ms.", timeoutMs); this.sender.forceClose(); // Only join the sender thread when not calling from callback. if (!invokedFromCallback) { try { this.ioThread.join(); } catch (InterruptedException e) { firstException.compareAndSet(null, new InterruptException(e)); } } } Utils.closeQuietly(interceptors, "producer interceptors", firstException); Utils.closeQuietly(producerMetrics, "producer metrics wrapper", firstException); Utils.closeQuietly(metrics, "producer metrics", firstException); Utils.closeQuietly(keySerializer, "producer keySerializer", firstException); Utils.closeQuietly(valueSerializer, "producer valueSerializer", firstException); Utils.closeQuietly(partitioner, "producer partitioner", firstException); AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics); Throwable exception = firstException.get(); if (exception != null && !swallowException) { if (exception instanceof InterruptException) { throw (InterruptException) exception; } throw new KafkaException("Failed to close kafka producer", exception); } log.debug("Kafka producer has been closed"); } private ClusterResourceListeners configureClusterResourceListeners(Serializer<K> keySerializer, Serializer<V> valueSerializer, List<?>... candidateLists) { ClusterResourceListeners clusterResourceListeners = new ClusterResourceListeners(); for (List<?> candidateList: candidateLists) clusterResourceListeners.maybeAddAll(candidateList); clusterResourceListeners.maybeAdd(keySerializer); clusterResourceListeners.maybeAdd(valueSerializer); return clusterResourceListeners; } /** * computes partition for given record. * if the record has partition returns the value otherwise * if custom partitioner is specified, call it to compute partition * otherwise try to calculate partition based on key. * If there is no key or key should be ignored return * RecordMetadata.UNKNOWN_PARTITION to indicate any partition * can be used (the partition is then calculated by built-in * partitioning logic). */ private int partition(ProducerRecord<K, V> record, byte[] serializedKey, byte[] serializedValue, Cluster cluster) { if (record.partition() != null) return record.partition(); if (partitioner != null) { int customPartition = partitioner.partition( record.topic(), record.key(), serializedKey, record.value(), serializedValue, cluster); if (customPartition < 0) { throw new IllegalArgumentException(String.format( "The partitioner generated an invalid partition number: %d. Partition number should always be non-negative.", customPartition)); } return customPartition; } if (serializedKey != null && !partitionerIgnoreKeys) { // hash the keyBytes to choose a partition return BuiltInPartitioner.partitionForKey(serializedKey, cluster.partitionsForTopic(record.topic()).size()); } else { return RecordMetadata.UNKNOWN_PARTITION; } } private void throwIfInvalidGroupMetadata(ConsumerGroupMetadata groupMetadata) { if (groupMetadata == null) { throw new IllegalArgumentException("Consumer group metadata could not be null"); } else if (groupMetadata.generationId() > 0 && JoinGroupRequest.UNKNOWN_MEMBER_ID.equals(groupMetadata.memberId())) { throw new IllegalArgumentException("Passed in group metadata " + groupMetadata + " has generationId > 0 but member.id "); } } private void throwIfNoTransactionManager() { if (transactionManager == null) throw new IllegalStateException("Cannot use transactional methods without enabling transactions " + "by setting the " + ProducerConfig.TRANSACTIONAL_ID_CONFIG + " configuration property"); } // Visible for testing String getClientId() { return clientId; } private static class ClusterAndWaitTime { final Cluster cluster; final long waitedOnMetadataMs; ClusterAndWaitTime(Cluster cluster, long waitedOnMetadataMs) { this.cluster = cluster; this.waitedOnMetadataMs = waitedOnMetadataMs; } } private static class FutureFailure implements Future<RecordMetadata> { private final ExecutionException exception; public FutureFailure(Exception exception) { this.exception = new ExecutionException(exception); } @Override public boolean cancel(boolean interrupt) { return false; } @Override public RecordMetadata get() throws ExecutionException { throw this.exception; } @Override public RecordMetadata get(long timeout, TimeUnit unit) throws ExecutionException { throw this.exception; } @Override public boolean isCancelled() { return false; } @Override public boolean isDone() { return true; } } /** * Callbacks that are called by the RecordAccumulator append functions: * - user callback * - interceptor callbacks * - partition callback */ private class AppendCallbacks<K, V> implements RecordAccumulator.AppendCallbacks { private final Callback userCallback; private final ProducerInterceptors<K, V> interceptors; private final String topic; private final Integer recordPartition; private final String recordLogString; private volatile int partition = RecordMetadata.UNKNOWN_PARTITION; private volatile TopicPartition topicPartition; private AppendCallbacks(Callback userCallback, ProducerInterceptors<K, V> interceptors, ProducerRecord<K, V> record) { this.userCallback = userCallback; this.interceptors = interceptors; // Extract record info as we don't want to keep a reference to the record during // whole lifetime of the batch. // We don't want to have an NPE here, because the interceptors would not be notified (see .doSend). topic = record != null ? record.topic() : null; recordPartition = record != null ? record.partition() : null; recordLogString = log.isTraceEnabled() && record != null ? record.toString() : ""; } @Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (metadata == null) { metadata = new RecordMetadata(topicPartition(), -1, -1, RecordBatch.NO_TIMESTAMP, -1, -1); } this.interceptors.onAcknowledgement(metadata, exception); if (this.userCallback != null) this.userCallback.onCompletion(metadata, exception); } @Override public void setPartition(int partition) { assert partition != RecordMetadata.UNKNOWN_PARTITION; this.partition = partition; if (log.isTraceEnabled()) { // Log the message here, because we don't know the partition before that. log.trace("Attempting to append record {} with callback {} to topic {} partition {}", recordLogString, userCallback, topic, partition); } } public int getPartition() { return partition; } public TopicPartition topicPartition() { if (topicPartition == null && topic != null) { if (partition != RecordMetadata.UNKNOWN_PARTITION) topicPartition = new TopicPartition(topic, partition); else if (recordPartition != null) topicPartition = new TopicPartition(topic, recordPartition); else topicPartition = new TopicPartition(topic, RecordMetadata.UNKNOWN_PARTITION); } return topicPartition; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/MockProducer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.producer.internals.FutureRecordMetadata; import org.apache.kafka.clients.producer.internals.ProduceRequestResult; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.common.utils.Time; import java.time.Duration; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Deque; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.Future; /** * A mock of the producer interface you can use for testing code that uses Kafka. * <p> * By default this mock will synchronously complete each send call successfully. However it can be configured to allow * the user to control the completion of the call and supply an optional error for the producer to throw. */ public class MockProducer<K, V> implements Producer<K, V> { private final Cluster cluster; private final Partitioner partitioner; private final List<ProducerRecord<K, V>> sent; private final List<ProducerRecord<K, V>> uncommittedSends; private final Deque<Completion> completions; private final Map<TopicPartition, Long> offsets; private final List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsets; private Map<String, Map<TopicPartition, OffsetAndMetadata>> uncommittedConsumerGroupOffsets; private final Serializer<K> keySerializer; private final Serializer<V> valueSerializer; private boolean autoComplete; private boolean closed; private boolean transactionInitialized; private boolean transactionInFlight; private boolean transactionCommitted; private boolean transactionAborted; private boolean producerFenced; private boolean sentOffsets; private long commitCount = 0L; private final Map<MetricName, Metric> mockMetrics; public RuntimeException initTransactionException = null; public RuntimeException beginTransactionException = null; public RuntimeException sendOffsetsToTransactionException = null; public RuntimeException commitTransactionException = null; public RuntimeException abortTransactionException = null; public RuntimeException sendException = null; public RuntimeException flushException = null; public RuntimeException partitionsForException = null; public RuntimeException closeException = null; /** * Create a mock producer * * @param cluster The cluster holding metadata for this producer * @param autoComplete If true automatically complete all requests successfully and execute the callback. Otherwise * the user must call {@link #completeNext()} or {@link #errorNext(RuntimeException)} after * {@link #send(ProducerRecord) send()} to complete the call and unblock the {@link * java.util.concurrent.Future Future&lt;RecordMetadata&gt;} that is returned. * @param partitioner The partition strategy * @param keySerializer The serializer for key that implements {@link Serializer}. * @param valueSerializer The serializer for value that implements {@link Serializer}. */ public MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) { this.cluster = cluster; this.autoComplete = autoComplete; this.partitioner = partitioner; this.keySerializer = keySerializer; this.valueSerializer = valueSerializer; this.offsets = new HashMap<>(); this.sent = new ArrayList<>(); this.uncommittedSends = new ArrayList<>(); this.consumerGroupOffsets = new ArrayList<>(); this.uncommittedConsumerGroupOffsets = new HashMap<>(); this.completions = new ArrayDeque<>(); this.mockMetrics = new HashMap<>(); } /** * Create a new mock producer with invented metadata the given autoComplete setting and key\value serializers. * * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer)} new MockProducer(Cluster.empty(), autoComplete, new DefaultPartitioner(), keySerializer, valueSerializer)} */ @SuppressWarnings("deprecation") public MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) { this(Cluster.empty(), autoComplete, new org.apache.kafka.clients.producer.internals.DefaultPartitioner(), keySerializer, valueSerializer); } /** * Create a new mock producer with invented metadata the given autoComplete setting and key\value serializers. * * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer)} new MockProducer(cluster, autoComplete, new DefaultPartitioner(), keySerializer, valueSerializer)} */ @SuppressWarnings("deprecation") public MockProducer(final Cluster cluster, final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) { this(cluster, autoComplete, new org.apache.kafka.clients.producer.internals.DefaultPartitioner(), keySerializer, valueSerializer); } /** * Create a new mock producer with invented metadata the given autoComplete setting, partitioner and key\value serializers. * * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer)} new MockProducer(Cluster.empty(), autoComplete, partitioner, keySerializer, valueSerializer)} */ public MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) { this(Cluster.empty(), autoComplete, partitioner, keySerializer, valueSerializer); } /** * Create a new mock producer with invented metadata. * * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer)} new MockProducer(Cluster.empty(), false, null, null, null)} */ public MockProducer() { this(Cluster.empty(), false, null, null, null); } @Override public void initTransactions() { verifyProducerState(); if (this.transactionInitialized) { throw new IllegalStateException("MockProducer has already been initialized for transactions."); } if (this.initTransactionException != null) { throw this.initTransactionException; } this.transactionInitialized = true; this.transactionInFlight = false; this.transactionCommitted = false; this.transactionAborted = false; this.sentOffsets = false; } @Override public void beginTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); if (this.beginTransactionException != null) { throw this.beginTransactionException; } if (transactionInFlight) { throw new IllegalStateException("Transaction already started"); } this.transactionInFlight = true; this.transactionCommitted = false; this.transactionAborted = false; this.sentOffsets = false; } @Deprecated @Override public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) throws ProducerFencedException { Objects.requireNonNull(consumerGroupId); sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId)); } @Override public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, ConsumerGroupMetadata groupMetadata) throws ProducerFencedException { Objects.requireNonNull(groupMetadata); verifyProducerState(); verifyTransactionsInitialized(); verifyTransactionInFlight(); if (this.sendOffsetsToTransactionException != null) { throw this.sendOffsetsToTransactionException; } if (offsets.size() == 0) { return; } Map<TopicPartition, OffsetAndMetadata> uncommittedOffsets = this.uncommittedConsumerGroupOffsets.computeIfAbsent(groupMetadata.groupId(), k -> new HashMap<>()); uncommittedOffsets.putAll(offsets); this.sentOffsets = true; } @Override public void commitTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyTransactionInFlight(); if (this.commitTransactionException != null) { throw this.commitTransactionException; } flush(); this.sent.addAll(this.uncommittedSends); if (!this.uncommittedConsumerGroupOffsets.isEmpty()) this.consumerGroupOffsets.add(this.uncommittedConsumerGroupOffsets); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets = new HashMap<>(); this.transactionCommitted = true; this.transactionAborted = false; this.transactionInFlight = false; ++this.commitCount; } @Override public void abortTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyTransactionInFlight(); if (this.abortTransactionException != null) { throw this.abortTransactionException; } flush(); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets.clear(); this.transactionCommitted = false; this.transactionAborted = true; this.transactionInFlight = false; } private synchronized void verifyProducerState() { if (this.closed) { throw new IllegalStateException("MockProducer is already closed."); } if (this.producerFenced) { throw new ProducerFencedException("MockProducer is fenced."); } } private void verifyTransactionsInitialized() { if (!this.transactionInitialized) { throw new IllegalStateException("MockProducer hasn't been initialized for transactions."); } } private void verifyTransactionInFlight() { if (!this.transactionInFlight) { throw new IllegalStateException("There is no open transaction."); } } /** * Adds the record to the list of sent records. The {@link RecordMetadata} returned will be immediately satisfied. * * @see #history() */ @Override public synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record) { return send(record, null); } /** * Adds the record to the list of sent records. * * @see #history() */ @Override public synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) { if (this.closed) { throw new IllegalStateException("MockProducer is already closed."); } if (this.producerFenced) { throw new KafkaException("MockProducer is fenced.", new ProducerFencedException("Fenced")); } if (this.sendException != null) { throw this.sendException; } int partition = 0; if (!this.cluster.partitionsForTopic(record.topic()).isEmpty()) partition = partition(record, this.cluster); else { //just to throw ClassCastException if serializers are not the proper ones to serialize key/value keySerializer.serialize(record.topic(), record.key()); valueSerializer.serialize(record.topic(), record.value()); } TopicPartition topicPartition = new TopicPartition(record.topic(), partition); ProduceRequestResult result = new ProduceRequestResult(topicPartition); FutureRecordMetadata future = new FutureRecordMetadata(result, 0, RecordBatch.NO_TIMESTAMP, 0, 0, Time.SYSTEM); long offset = nextOffset(topicPartition); long baseOffset = Math.max(0, offset - Integer.MAX_VALUE); int batchIndex = (int) Math.min(Integer.MAX_VALUE, offset); Completion completion = new Completion(offset, new RecordMetadata(topicPartition, baseOffset, batchIndex, RecordBatch.NO_TIMESTAMP, 0, 0), result, callback, topicPartition); if (!this.transactionInFlight) this.sent.add(record); else this.uncommittedSends.add(record); if (autoComplete) completion.complete(null); else this.completions.addLast(completion); return future; } /** * Get the next offset for this topic/partition */ private long nextOffset(TopicPartition tp) { Long offset = this.offsets.get(tp); if (offset == null) { this.offsets.put(tp, 1L); return 0L; } else { Long next = offset + 1; this.offsets.put(tp, next); return offset; } } public synchronized void flush() { verifyProducerState(); if (this.flushException != null) { throw this.flushException; } while (!this.completions.isEmpty()) completeNext(); } public List<PartitionInfo> partitionsFor(String topic) { if (this.partitionsForException != null) { throw this.partitionsForException; } return this.cluster.partitionsForTopic(topic); } public Map<MetricName, Metric> metrics() { return mockMetrics; } /** * Set a mock metric for testing purpose */ public void setMockMetrics(MetricName name, Metric metric) { mockMetrics.put(name, metric); } @Override public void close() { close(Duration.ofMillis(0)); } @Override public void close(Duration timeout) { if (this.closeException != null) { throw this.closeException; } this.closed = true; } public boolean closed() { return this.closed; } public synchronized void fenceProducer() { verifyProducerState(); verifyTransactionsInitialized(); this.producerFenced = true; } public boolean transactionInitialized() { return this.transactionInitialized; } public boolean transactionInFlight() { return this.transactionInFlight; } public boolean transactionCommitted() { return this.transactionCommitted; } public boolean transactionAborted() { return this.transactionAborted; } public boolean flushed() { return this.completions.isEmpty(); } public boolean sentOffsets() { return this.sentOffsets; } public long commitCount() { return this.commitCount; } /** * Get the list of sent records since the last call to {@link #clear()} */ public synchronized List<ProducerRecord<K, V>> history() { return new ArrayList<>(this.sent); } public synchronized List<ProducerRecord<K, V>> uncommittedRecords() { return new ArrayList<>(this.uncommittedSends); } /** * * Get the list of committed consumer group offsets since the last call to {@link #clear()} */ public synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory() { return new ArrayList<>(this.consumerGroupOffsets); } public synchronized Map<String, Map<TopicPartition, OffsetAndMetadata>> uncommittedOffsets() { return this.uncommittedConsumerGroupOffsets; } /** * Clear the stored history of sent records, consumer group offsets */ public synchronized void clear() { this.sent.clear(); this.uncommittedSends.clear(); this.sentOffsets = false; this.completions.clear(); this.consumerGroupOffsets.clear(); this.uncommittedConsumerGroupOffsets.clear(); } /** * Complete the earliest uncompleted call successfully. * * @return true if there was an uncompleted call to complete */ public synchronized boolean completeNext() { return errorNext(null); } /** * Complete the earliest uncompleted call with the given error. * * @return true if there was an uncompleted call to complete */ public synchronized boolean errorNext(RuntimeException e) { Completion completion = this.completions.pollFirst(); if (completion != null) { completion.complete(e); return true; } else { return false; } } /** * computes partition for given record. */ private int partition(ProducerRecord<K, V> record, Cluster cluster) { Integer partition = record.partition(); String topic = record.topic(); if (partition != null) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); // they have given us a partition, use it if (partition < 0 || partition >= numPartitions) throw new IllegalArgumentException("Invalid partition given with record: " + partition + " is not in the range [0..." + numPartitions + "]."); return partition; } byte[] keyBytes = keySerializer.serialize(topic, record.headers(), record.key()); byte[] valueBytes = valueSerializer.serialize(topic, record.headers(), record.value()); return this.partitioner.partition(topic, record.key(), keyBytes, record.value(), valueBytes, cluster); } private static class Completion { private final long offset; private final RecordMetadata metadata; private final ProduceRequestResult result; private final Callback callback; private final TopicPartition tp; public Completion(long offset, RecordMetadata metadata, ProduceRequestResult result, Callback callback, TopicPartition tp) { this.metadata = metadata; this.offset = offset; this.result = result; this.callback = callback; this.tp = tp; } public void complete(RuntimeException e) { if (e == null) { result.set(offset, RecordBatch.NO_TIMESTAMP, null); } else { result.set(-1, RecordBatch.NO_TIMESTAMP, index -> e); } if (callback != null) { if (e == null) callback.onCompletion(metadata, null); else callback.onCompletion(new RecordMetadata(tp, -1, -1, RecordBatch.NO_TIMESTAMP, -1, -1), e); } result.done(); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/Partitioner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer; import org.apache.kafka.common.Configurable; import org.apache.kafka.common.Cluster; import java.io.Closeable; /** * Partitioner Interface */ public interface Partitioner extends Configurable, Closeable { /** * Compute the partition for the given record. * * @param topic The topic name * @param key The key to partition on (or null if no key) * @param keyBytes The serialized key to partition on( or null if no key) * @param value The value to partition on or null * @param valueBytes The serialized value to partition on or null * @param cluster The current cluster metadata */ int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster); /** * This is called when partitioner is closed. */ void close(); /** * Note this method is only implemented in DefaultPartitioner and {@link UniformStickyPartitioner} which * are now deprecated. See <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-794%3A+Strictly+Uniform+Sticky+Partitioner">KIP-794</a> for more info. * <p> * Notifies the partitioner a new batch is about to be created. When using the sticky partitioner, * this method can change the chosen sticky partition for the new batch. * @param topic The topic name * @param cluster The current cluster metadata * @param prevPartition The partition previously selected for the record that triggered a new batch * @deprecated Since 3.3.0 */ @Deprecated default void onNewBatch(String topic, Cluster cluster, int prevPartition) { } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/Producer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.ProducerFencedException; import java.io.Closeable; import java.time.Duration; import java.util.List; import java.util.Map; import java.util.concurrent.Future; /** * The interface for the {@link KafkaProducer} * @see KafkaProducer * @see MockProducer */ public interface Producer<K, V> extends Closeable { /** * See {@link KafkaProducer#initTransactions()} */ void initTransactions(); /** * See {@link KafkaProducer#beginTransaction()} */ void beginTransaction() throws ProducerFencedException; /** * See {@link KafkaProducer#sendOffsetsToTransaction(Map, String)} */ @Deprecated void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) throws ProducerFencedException; /** * See {@link KafkaProducer#sendOffsetsToTransaction(Map, ConsumerGroupMetadata)} */ void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, ConsumerGroupMetadata groupMetadata) throws ProducerFencedException; /** * See {@link KafkaProducer#commitTransaction()} */ void commitTransaction() throws ProducerFencedException; /** * See {@link KafkaProducer#abortTransaction()} */ void abortTransaction() throws ProducerFencedException; /** * See {@link KafkaProducer#send(ProducerRecord)} */ Future<RecordMetadata> send(ProducerRecord<K, V> record); /** * See {@link KafkaProducer#send(ProducerRecord, Callback)} */ Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); /** * See {@link KafkaProducer#flush()} */ void flush(); /** * See {@link KafkaProducer#partitionsFor(String)} */ List<PartitionInfo> partitionsFor(String topic); /** * See {@link KafkaProducer#metrics()} */ Map<MetricName, ? extends Metric> metrics(); /** * See {@link KafkaProducer#close()} */ void close(); /** * See {@link KafkaProducer#close(Duration)} */ void close(Duration timeout); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/ProducerConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer; import org.apache.kafka.clients.ClientDnsLookup; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.config.SecurityConfig; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; import static org.apache.kafka.common.config.ConfigDef.Range.between; import static org.apache.kafka.common.config.ConfigDef.ValidString.in; /** * Configuration for the Kafka Producer. Documentation for these configurations can be found in the <a * href="http://kafka.apache.org/documentation.html#producerconfigs">Kafka documentation</a> */ public class ProducerConfig extends AbstractConfig { private static final Logger log = LoggerFactory.getLogger(ProducerConfig.class); /* * NOTE: DO NOT CHANGE EITHER CONFIG STRINGS OR THEIR JAVA VARIABLE NAMES AS THESE ARE PART OF THE PUBLIC API AND * CHANGE WILL BREAK USER CODE. */ private static final ConfigDef CONFIG; /** <code>bootstrap.servers</code> */ public static final String BOOTSTRAP_SERVERS_CONFIG = CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG; /** <code>client.dns.lookup</code> */ public static final String CLIENT_DNS_LOOKUP_CONFIG = CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG; /** <code>metadata.max.age.ms</code> */ public static final String METADATA_MAX_AGE_CONFIG = CommonClientConfigs.METADATA_MAX_AGE_CONFIG; private static final String METADATA_MAX_AGE_DOC = CommonClientConfigs.METADATA_MAX_AGE_DOC; /** <code>metadata.max.idle.ms</code> */ public static final String METADATA_MAX_IDLE_CONFIG = "metadata.max.idle.ms"; private static final String METADATA_MAX_IDLE_DOC = "Controls how long the producer will cache metadata for a topic that's idle. If the elapsed " + "time since a topic was last produced to exceeds the metadata idle duration, then the topic's " + "metadata is forgotten and the next access to it will force a metadata fetch request."; /** <code>batch.size</code> */ public static final String BATCH_SIZE_CONFIG = "batch.size"; private static final String BATCH_SIZE_DOC = "The producer will attempt to batch records together into fewer requests whenever multiple records are being sent" + " to the same partition. This helps performance on both the client and the server. This configuration controls the " + "default batch size in bytes. " + "<p>" + "No attempt will be made to batch records larger than this size. " + "<p>" + "Requests sent to brokers will contain multiple batches, one for each partition with data available to be sent. " + "<p>" + "A small batch size will make batching less common and may reduce throughput (a batch size of zero will disable " + "batching entirely). A very large batch size may use memory a bit more wastefully as we will always allocate a " + "buffer of the specified batch size in anticipation of additional records." + "<p>" + "Note: This setting gives the upper bound of the batch size to be sent. If we have fewer than this many bytes accumulated " + "for this partition, we will 'linger' for the <code>linger.ms</code> time waiting for more records to show up. " + "This <code>linger.ms</code> setting defaults to 0, which means we'll immediately send out a record even the accumulated " + "batch size is under this <code>batch.size</code> setting."; /** <code>partitioner.adaptive.partitioning.enable</code> */ public static final String PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG = "partitioner.adaptive.partitioning.enable"; private static final String PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_DOC = "When set to 'true', the producer will try to adapt to broker performance and produce more messages to partitions hosted on faster brokers. " + "If 'false', producer will try to distribute messages uniformly. Note: this setting has no effect if a custom partitioner is used"; /** <code>partitioner.availability.timeout.ms</code> */ public static final String PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG = "partitioner.availability.timeout.ms"; private static final String PARTITIONER_AVAILABILITY_TIMEOUT_MS_DOC = "If a broker cannot process produce requests from a partition for <code>" + PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG + "</code> time, " + "the partitioner treats that partition as not available. If the value is 0, this logic is disabled. " + "Note: this setting has no effect if a custom partitioner is used or <code>" + PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG + "</code> is set to 'false'"; /** <code>partitioner.ignore.keys</code> */ public static final String PARTITIONER_IGNORE_KEYS_CONFIG = "partitioner.ignore.keys"; private static final String PARTITIONER_IGNORE_KEYS_DOC = "When set to 'true' the producer won't use record keys to choose a partition. " + "If 'false', producer would choose a partition based on a hash of the key when a key is present. " + "Note: this setting has no effect if a custom partitioner is used."; /** <code>acks</code> */ public static final String ACKS_CONFIG = "acks"; private static final String ACKS_DOC = "The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the " + " durability of records that are sent. The following settings are allowed: " + " <ul>" + " <li><code>acks=0</code> If set to zero then the producer will not wait for any acknowledgment from the" + " server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can be" + " made that the server has received the record in this case, and the <code>retries</code> configuration will not" + " take effect (as the client won't generally know of any failures). The offset given back for each record will" + " always be set to <code>-1</code>." + " <li><code>acks=1</code> This will mean the leader will write the record to its local log but will respond" + " without awaiting full acknowledgement from all followers. In this case should the leader fail immediately after" + " acknowledging the record but before the followers have replicated it then the record will be lost." + " <li><code>acks=all</code> This means the leader will wait for the full set of in-sync replicas to" + " acknowledge the record. This guarantees that the record will not be lost as long as at least one in-sync replica" + " remains alive. This is the strongest available guarantee. This is equivalent to the acks=-1 setting." + "</ul>" + "<p>" + "Note that enabling idempotence requires this config value to be 'all'." + " If conflicting configurations are set and idempotence is not explicitly enabled, idempotence is disabled."; /** <code>linger.ms</code> */ public static final String LINGER_MS_CONFIG = "linger.ms"; private static final String LINGER_MS_DOC = "The producer groups together any records that arrive in between request transmissions into a single batched request. " + "Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to " + "reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount " + "of artificial delay&mdash;that is, rather than immediately sending out a record, the producer will wait for up to " + "the given delay to allow other records to be sent so that the sends can be batched together. This can be thought " + "of as analogous to Nagle's algorithm in TCP. This setting gives the upper bound on the delay for batching: once " + "we get <code>" + BATCH_SIZE_CONFIG + "</code> worth of records for a partition it will be sent immediately regardless of this " + "setting, however if we have fewer than this many bytes accumulated for this partition we will 'linger' for the " + "specified time waiting for more records to show up. This setting defaults to 0 (i.e. no delay). Setting <code>" + LINGER_MS_CONFIG + "=5</code>, " + "for example, would have the effect of reducing the number of requests sent but would add up to 5ms of latency to records sent in the absence of load."; /** <code>request.timeout.ms</code> */ public static final String REQUEST_TIMEOUT_MS_CONFIG = CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG; private static final String REQUEST_TIMEOUT_MS_DOC = CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC + " This should be larger than <code>replica.lag.time.max.ms</code> (a broker configuration)" + " to reduce the possibility of message duplication due to unnecessary producer retries."; /** <code>delivery.timeout.ms</code> */ public static final String DELIVERY_TIMEOUT_MS_CONFIG = "delivery.timeout.ms"; private static final String DELIVERY_TIMEOUT_MS_DOC = "An upper bound on the time to report success or failure " + "after a call to <code>send()</code> returns. This limits the total time that a record will be delayed " + "prior to sending, the time to await acknowledgement from the broker (if expected), and the time allowed " + "for retriable send failures. The producer may report failure to send a record earlier than this config if " + "either an unrecoverable error is encountered, the retries have been exhausted, " + "or the record is added to a batch which reached an earlier delivery expiration deadline. " + "The value of this config should be greater than or equal to the sum of <code>" + REQUEST_TIMEOUT_MS_CONFIG + "</code> " + "and <code>" + LINGER_MS_CONFIG + "</code>."; /** <code>client.id</code> */ public static final String CLIENT_ID_CONFIG = CommonClientConfigs.CLIENT_ID_CONFIG; /** <code>send.buffer.bytes</code> */ public static final String SEND_BUFFER_CONFIG = CommonClientConfigs.SEND_BUFFER_CONFIG; /** <code>receive.buffer.bytes</code> */ public static final String RECEIVE_BUFFER_CONFIG = CommonClientConfigs.RECEIVE_BUFFER_CONFIG; /** <code>max.request.size</code> */ public static final String MAX_REQUEST_SIZE_CONFIG = "max.request.size"; private static final String MAX_REQUEST_SIZE_DOC = "The maximum size of a request in bytes. This setting will limit the number of record " + "batches the producer will send in a single request to avoid sending huge requests. " + "This is also effectively a cap on the maximum uncompressed record batch size. Note that the server " + "has its own cap on the record batch size (after compression if compression is enabled) which may be different from this."; /** <code>reconnect.backoff.ms</code> */ public static final String RECONNECT_BACKOFF_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG; /** <code>reconnect.backoff.max.ms</code> */ public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG; /** <code>max.block.ms</code> */ public static final String MAX_BLOCK_MS_CONFIG = "max.block.ms"; private static final String MAX_BLOCK_MS_DOC = "The configuration controls how long the <code>KafkaProducer</code>'s <code>send()</code>, <code>partitionsFor()</code>, " + "<code>initTransactions()</code>, <code>sendOffsetsToTransaction()</code>, <code>commitTransaction()</code> " + "and <code>abortTransaction()</code> methods will block. " + "For <code>send()</code> this timeout bounds the total time waiting for both metadata fetch and buffer allocation " + "(blocking in the user-supplied serializers or partitioner is not counted against this timeout). " + "For <code>partitionsFor()</code> this timeout bounds the time spent waiting for metadata if it is unavailable. " + "The transaction-related methods always block, but may timeout if " + "the transaction coordinator could not be discovered or did not respond within the timeout."; /** <code>buffer.memory</code> */ public static final String BUFFER_MEMORY_CONFIG = "buffer.memory"; private static final String BUFFER_MEMORY_DOC = "The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are " + "sent faster than they can be delivered to the server the producer will block for <code>" + MAX_BLOCK_MS_CONFIG + "</code> after which it will throw an exception." + "<p>" + "This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since " + "not all memory the producer uses is used for buffering. Some additional memory will be used for compression (if " + "compression is enabled) as well as for maintaining in-flight requests."; /** <code>retry.backoff.ms</code> */ public static final String RETRY_BACKOFF_MS_CONFIG = CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG; /** <code>compression.type</code> */ public static final String COMPRESSION_TYPE_CONFIG = "compression.type"; private static final String COMPRESSION_TYPE_DOC = "The compression type for all data generated by the producer. The default is none (i.e. no compression). Valid " + " values are <code>none</code>, <code>gzip</code>, <code>snappy</code>, <code>lz4</code>, or <code>zstd</code>. " + "Compression is of full batches of data, so the efficacy of batching will also impact the compression ratio (more batching means better compression)."; /** <code>metrics.sample.window.ms</code> */ public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG; /** <code>metrics.num.samples</code> */ public static final String METRICS_NUM_SAMPLES_CONFIG = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG; /** * <code>metrics.recording.level</code> */ public static final String METRICS_RECORDING_LEVEL_CONFIG = CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG; /** <code>metric.reporters</code> */ public static final String METRIC_REPORTER_CLASSES_CONFIG = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG; /** <code>auto.include.jmx.reporter</code> */ @Deprecated public static final String AUTO_INCLUDE_JMX_REPORTER_CONFIG = CommonClientConfigs.AUTO_INCLUDE_JMX_REPORTER_CONFIG; // max.in.flight.requests.per.connection should be less than or equal to 5 when idempotence producer enabled to ensure message ordering private static final int MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_FOR_IDEMPOTENCE = 5; /** <code>max.in.flight.requests.per.connection</code> */ public static final String MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION = "max.in.flight.requests.per.connection"; private static final String MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_DOC = "The maximum number of unacknowledged requests the client will send on a single connection before blocking." + " Note that if this configuration is set to be greater than 1 and <code>enable.idempotence</code> is set to false, there is a risk of" + " message reordering after a failed send due to retries (i.e., if retries are enabled); " + " if retries are disabled or if <code>enable.idempotence</code> is set to true, ordering will be preserved." + " Additionally, enabling idempotence requires the value of this configuration to be less than or equal to " + MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_FOR_IDEMPOTENCE + "." + " If conflicting configurations are set and idempotence is not explicitly enabled, idempotence is disabled. "; /** <code>retries</code> */ public static final String RETRIES_CONFIG = CommonClientConfigs.RETRIES_CONFIG; private static final String RETRIES_DOC = "Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error." + " Note that this retry is no different than if the client resent the record upon receiving the error." + " Produce requests will be failed before the number of retries has been exhausted if the timeout configured by" + " <code>" + DELIVERY_TIMEOUT_MS_CONFIG + "</code> expires first before successful acknowledgement. Users should generally" + " prefer to leave this config unset and instead use <code>" + DELIVERY_TIMEOUT_MS_CONFIG + "</code> to control" + " retry behavior." + "<p>" + "Enabling idempotence requires this config value to be greater than 0." + " If conflicting configurations are set and idempotence is not explicitly enabled, idempotence is disabled." + "<p>" + "Allowing retries while setting <code>enable.idempotence</code> to <code>false</code> and <code>" + MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "</code> to greater than 1 will potentially change the" + " ordering of records because if two batches are sent to a single partition, and the first fails and is retried but the second" + " succeeds, then the records in the second batch may appear first."; /** <code>key.serializer</code> */ public static final String KEY_SERIALIZER_CLASS_CONFIG = "key.serializer"; public static final String KEY_SERIALIZER_CLASS_DOC = "Serializer class for key that implements the <code>org.apache.kafka.common.serialization.Serializer</code> interface."; /** <code>value.serializer</code> */ public static final String VALUE_SERIALIZER_CLASS_CONFIG = "value.serializer"; public static final String VALUE_SERIALIZER_CLASS_DOC = "Serializer class for value that implements the <code>org.apache.kafka.common.serialization.Serializer</code> interface."; /** <code>socket.connection.setup.timeout.ms</code> */ public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG; /** <code>socket.connection.setup.timeout.max.ms</code> */ public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG; /** <code>connections.max.idle.ms</code> */ public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG; /** <code>partitioner.class</code> */ public static final String PARTITIONER_CLASS_CONFIG = "partitioner.class"; private static final String PARTITIONER_CLASS_DOC = "A class to use to determine which partition to be send to when produce the records. Available options are:" + "<ul>" + "<li>If not set, the default partitioning logic is used. " + "This strategy will try sticking to a partition until at least " + BATCH_SIZE_CONFIG + " bytes is produced to the partition. It works with the strategy:" + "<ul>" + "<li>If no partition is specified but a key is present, choose a partition based on a hash of the key</li>" + "<li>If no partition or key is present, choose the sticky partition that changes when at least " + BATCH_SIZE_CONFIG + " bytes are produced to the partition.</li>" + "</ul>" + "</li>" + "<li><code>org.apache.kafka.clients.producer.RoundRobinPartitioner</code>: This partitioning strategy is that " + "each record in a series of consecutive records will be sent to a different partition(no matter if the 'key' is provided or not), " + "until we run out of partitions and start over again. Note: There's a known issue that will cause uneven distribution when new batch is created. " + "Please check KAFKA-9965 for more detail." + "</li>" + "</ul>" + "<p>Implementing the <code>org.apache.kafka.clients.producer.Partitioner</code> interface allows you to plug in a custom partitioner."; /** <code>interceptor.classes</code> */ public static final String INTERCEPTOR_CLASSES_CONFIG = "interceptor.classes"; public static final String INTERCEPTOR_CLASSES_DOC = "A list of classes to use as interceptors. " + "Implementing the <code>org.apache.kafka.clients.producer.ProducerInterceptor</code> interface allows you to intercept (and possibly mutate) the records " + "received by the producer before they are published to the Kafka cluster. By default, there are no interceptors."; /** <code>enable.idempotence</code> */ public static final String ENABLE_IDEMPOTENCE_CONFIG = "enable.idempotence"; public static final String ENABLE_IDEMPOTENCE_DOC = "When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer " + "retries due to broker failures, etc., may write duplicates of the retried message in the stream. " + "Note that enabling idempotence requires <code>" + MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "</code> to be less than or equal to " + MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_FOR_IDEMPOTENCE + " (with message ordering preserved for any allowable value), <code>" + RETRIES_CONFIG + "</code> to be greater than 0, and <code>" + ACKS_CONFIG + "</code> must be 'all'. " + "<p>" + "Idempotence is enabled by default if no conflicting configurations are set. " + "If conflicting configurations are set and idempotence is not explicitly enabled, idempotence is disabled. " + "If idempotence is explicitly enabled and conflicting configurations are set, a <code>ConfigException</code> is thrown."; /** <code> transaction.timeout.ms </code> */ public static final String TRANSACTION_TIMEOUT_CONFIG = "transaction.timeout.ms"; public static final String TRANSACTION_TIMEOUT_DOC = "The maximum amount of time in milliseconds that a transaction will remain open before the coordinator proactively aborts it. " + "The start of the transaction is set at the time that the first partition is added to it. " + "If this value is larger than the <code>transaction.max.timeout.ms<code> setting in the broker, the request will fail with a <code>InvalidTxnTimeoutException</code> error."; /** <code> transactional.id </code> */ public static final String TRANSACTIONAL_ID_CONFIG = "transactional.id"; public static final String TRANSACTIONAL_ID_DOC = "The TransactionalId to use for transactional delivery. This enables reliability semantics which span multiple producer sessions since it allows the client to guarantee that transactions using the same TransactionalId have been completed prior to starting any new transactions. If no TransactionalId is provided, then the producer is limited to idempotent delivery. " + "If a TransactionalId is configured, <code>enable.idempotence</code> is implied. " + "By default the TransactionId is not configured, which means transactions cannot be used. " + "Note that, by default, transactions require a cluster of at least three brokers which is the recommended setting for production; for development you can change this, by adjusting broker setting <code>transaction.state.log.replication.factor</code>."; /** * <code>security.providers</code> */ public static final String SECURITY_PROVIDERS_CONFIG = SecurityConfig.SECURITY_PROVIDERS_CONFIG; private static final String SECURITY_PROVIDERS_DOC = SecurityConfig.SECURITY_PROVIDERS_DOC; private static final AtomicInteger PRODUCER_CLIENT_ID_SEQUENCE = new AtomicInteger(1); static { CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, Collections.emptyList(), new ConfigDef.NonNullValidator(), Importance.HIGH, CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) .define(CLIENT_DNS_LOOKUP_CONFIG, Type.STRING, ClientDnsLookup.USE_ALL_DNS_IPS.toString(), in(ClientDnsLookup.USE_ALL_DNS_IPS.toString(), ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY.toString()), Importance.MEDIUM, CommonClientConfigs.CLIENT_DNS_LOOKUP_DOC) .define(BUFFER_MEMORY_CONFIG, Type.LONG, 32 * 1024 * 1024L, atLeast(0L), Importance.HIGH, BUFFER_MEMORY_DOC) .define(RETRIES_CONFIG, Type.INT, Integer.MAX_VALUE, between(0, Integer.MAX_VALUE), Importance.HIGH, RETRIES_DOC) .define(ACKS_CONFIG, Type.STRING, "all", in("all", "-1", "0", "1"), Importance.LOW, ACKS_DOC) .define(COMPRESSION_TYPE_CONFIG, Type.STRING, CompressionType.NONE.name, in(Utils.enumOptions(CompressionType.class)), Importance.HIGH, COMPRESSION_TYPE_DOC) .define(BATCH_SIZE_CONFIG, Type.INT, 16384, atLeast(0), Importance.MEDIUM, BATCH_SIZE_DOC) .define(PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG, Type.BOOLEAN, true, Importance.LOW, PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_DOC) .define(PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG, Type.LONG, 0, atLeast(0), Importance.LOW, PARTITIONER_AVAILABILITY_TIMEOUT_MS_DOC) .define(PARTITIONER_IGNORE_KEYS_CONFIG, Type.BOOLEAN, false, Importance.MEDIUM, PARTITIONER_IGNORE_KEYS_DOC) .define(LINGER_MS_CONFIG, Type.LONG, 0, atLeast(0), Importance.MEDIUM, LINGER_MS_DOC) .define(DELIVERY_TIMEOUT_MS_CONFIG, Type.INT, 120 * 1000, atLeast(0), Importance.MEDIUM, DELIVERY_TIMEOUT_MS_DOC) .define(CLIENT_ID_CONFIG, Type.STRING, "", Importance.MEDIUM, CommonClientConfigs.CLIENT_ID_DOC) .define(SEND_BUFFER_CONFIG, Type.INT, 128 * 1024, atLeast(CommonClientConfigs.SEND_BUFFER_LOWER_BOUND), Importance.MEDIUM, CommonClientConfigs.SEND_BUFFER_DOC) .define(RECEIVE_BUFFER_CONFIG, Type.INT, 32 * 1024, atLeast(CommonClientConfigs.RECEIVE_BUFFER_LOWER_BOUND), Importance.MEDIUM, CommonClientConfigs.RECEIVE_BUFFER_DOC) .define(MAX_REQUEST_SIZE_CONFIG, Type.INT, 1024 * 1024, atLeast(0), Importance.MEDIUM, MAX_REQUEST_SIZE_DOC) .define(RECONNECT_BACKOFF_MS_CONFIG, Type.LONG, 50L, atLeast(0L), Importance.LOW, CommonClientConfigs.RECONNECT_BACKOFF_MS_DOC) .define(RECONNECT_BACKOFF_MAX_MS_CONFIG, Type.LONG, 1000L, atLeast(0L), Importance.LOW, CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_DOC) .define(RETRY_BACKOFF_MS_CONFIG, Type.LONG, 100L, atLeast(0L), Importance.LOW, CommonClientConfigs.RETRY_BACKOFF_MS_DOC) .define(MAX_BLOCK_MS_CONFIG, Type.LONG, 60 * 1000, atLeast(0), Importance.MEDIUM, MAX_BLOCK_MS_DOC) .define(REQUEST_TIMEOUT_MS_CONFIG, Type.INT, 30 * 1000, atLeast(0), Importance.MEDIUM, REQUEST_TIMEOUT_MS_DOC) .define(METADATA_MAX_AGE_CONFIG, Type.LONG, 5 * 60 * 1000, atLeast(0), Importance.LOW, METADATA_MAX_AGE_DOC) .define(METADATA_MAX_IDLE_CONFIG, Type.LONG, 5 * 60 * 1000, atLeast(5000), Importance.LOW, METADATA_MAX_IDLE_DOC) .define(METRICS_SAMPLE_WINDOW_MS_CONFIG, Type.LONG, 30000, atLeast(0), Importance.LOW, CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_DOC) .define(METRICS_NUM_SAMPLES_CONFIG, Type.INT, 2, atLeast(1), Importance.LOW, CommonClientConfigs.METRICS_NUM_SAMPLES_DOC) .define(METRICS_RECORDING_LEVEL_CONFIG, Type.STRING, Sensor.RecordingLevel.INFO.toString(), in(Sensor.RecordingLevel.INFO.toString(), Sensor.RecordingLevel.DEBUG.toString(), Sensor.RecordingLevel.TRACE.toString()), Importance.LOW, CommonClientConfigs.METRICS_RECORDING_LEVEL_DOC) .define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, Collections.emptyList(), new ConfigDef.NonNullValidator(), Importance.LOW, CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) .define(AUTO_INCLUDE_JMX_REPORTER_CONFIG, Type.BOOLEAN, true, Importance.LOW, CommonClientConfigs.AUTO_INCLUDE_JMX_REPORTER_DOC) .define(MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, Type.INT, 5, atLeast(1), Importance.LOW, MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_DOC) .define(KEY_SERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, KEY_SERIALIZER_CLASS_DOC) .define(VALUE_SERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, VALUE_SERIALIZER_CLASS_DOC) .define(SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG, Type.LONG, CommonClientConfigs.DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MS, Importance.MEDIUM, CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_DOC) .define(SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG, Type.LONG, CommonClientConfigs.DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS, Importance.MEDIUM, CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_DOC) /* default is set to be a bit lower than the server default (10 min), to avoid both client and server closing connection at same time */ .define(CONNECTIONS_MAX_IDLE_MS_CONFIG, Type.LONG, 9 * 60 * 1000, Importance.MEDIUM, CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC) .define(PARTITIONER_CLASS_CONFIG, Type.CLASS, null, Importance.MEDIUM, PARTITIONER_CLASS_DOC) .define(INTERCEPTOR_CLASSES_CONFIG, Type.LIST, Collections.emptyList(), new ConfigDef.NonNullValidator(), Importance.LOW, INTERCEPTOR_CLASSES_DOC) .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, ConfigDef.CaseInsensitiveValidString .in(Utils.enumOptions(SecurityProtocol.class)), Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) .define(SECURITY_PROVIDERS_CONFIG, Type.STRING, null, Importance.LOW, SECURITY_PROVIDERS_DOC) .withClientSslSupport() .withClientSaslSupport() .define(ENABLE_IDEMPOTENCE_CONFIG, Type.BOOLEAN, true, Importance.LOW, ENABLE_IDEMPOTENCE_DOC) .define(TRANSACTION_TIMEOUT_CONFIG, Type.INT, 60000, Importance.LOW, TRANSACTION_TIMEOUT_DOC) .define(TRANSACTIONAL_ID_CONFIG, Type.STRING, null, new ConfigDef.NonEmptyString(), Importance.LOW, TRANSACTIONAL_ID_DOC); } @Override protected Map<String, Object> postProcessParsedConfig(final Map<String, Object> parsedValues) { CommonClientConfigs.postValidateSaslMechanismConfig(this); Map<String, Object> refinedConfigs = CommonClientConfigs.postProcessReconnectBackoffConfigs(this, parsedValues); postProcessAndValidateIdempotenceConfigs(refinedConfigs); maybeOverrideClientId(refinedConfigs); return refinedConfigs; } private void maybeOverrideClientId(final Map<String, Object> configs) { String refinedClientId; boolean userConfiguredClientId = this.originals().containsKey(CLIENT_ID_CONFIG); if (userConfiguredClientId) { refinedClientId = this.getString(CLIENT_ID_CONFIG); } else { String transactionalId = this.getString(TRANSACTIONAL_ID_CONFIG); refinedClientId = "producer-" + (transactionalId != null ? transactionalId : PRODUCER_CLIENT_ID_SEQUENCE.getAndIncrement()); } configs.put(CLIENT_ID_CONFIG, refinedClientId); } private void postProcessAndValidateIdempotenceConfigs(final Map<String, Object> configs) { final Map<String, Object> originalConfigs = this.originals(); final String acksStr = parseAcks(this.getString(ACKS_CONFIG)); configs.put(ACKS_CONFIG, acksStr); final boolean userConfiguredIdempotence = this.originals().containsKey(ENABLE_IDEMPOTENCE_CONFIG); boolean idempotenceEnabled = this.getBoolean(ENABLE_IDEMPOTENCE_CONFIG); boolean shouldDisableIdempotence = false; // For idempotence producers, values for `retries` and `acks` and `max.in.flight.requests.per.connection` need validation if (idempotenceEnabled) { final int retries = this.getInt(RETRIES_CONFIG); if (retries == 0) { if (userConfiguredIdempotence) { throw new ConfigException("Must set " + RETRIES_CONFIG + " to non-zero when using the idempotent producer."); } log.info("Idempotence will be disabled because {} is set to 0.", RETRIES_CONFIG); shouldDisableIdempotence = true; } final short acks = Short.parseShort(acksStr); if (acks != (short) -1) { if (userConfiguredIdempotence) { throw new ConfigException("Must set " + ACKS_CONFIG + " to all in order to use the idempotent " + "producer. Otherwise we cannot guarantee idempotence."); } log.info("Idempotence will be disabled because {} is set to {}, not set to 'all'.", ACKS_CONFIG, acks); shouldDisableIdempotence = true; } final int inFlightConnection = this.getInt(MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION); if (MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_FOR_IDEMPOTENCE < inFlightConnection) { if (userConfiguredIdempotence) { throw new ConfigException("Must set " + MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + " to at most 5" + " to use the idempotent producer."); } log.warn("Idempotence will be disabled because {} is set to {}, which is greater than 5. " + "Please note that in v4.0.0 and onward, this will become an error.", MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, inFlightConnection); shouldDisableIdempotence = true; } } if (shouldDisableIdempotence) { configs.put(ENABLE_IDEMPOTENCE_CONFIG, false); idempotenceEnabled = false; } // validate `transaction.id` after validating idempotence dependant configs because `enable.idempotence` config might be overridden boolean userConfiguredTransactions = originalConfigs.containsKey(TRANSACTIONAL_ID_CONFIG); if (!idempotenceEnabled && userConfiguredTransactions) { throw new ConfigException("Cannot set a " + ProducerConfig.TRANSACTIONAL_ID_CONFIG + " without also enabling idempotence."); } } private static String parseAcks(String acksString) { try { return acksString.trim().equalsIgnoreCase("all") ? "-1" : Short.parseShort(acksString.trim()) + ""; } catch (NumberFormatException e) { throw new ConfigException("Invalid configuration value for 'acks': " + acksString); } } static Map<String, Object> appendSerializerToConfig(Map<String, Object> configs, Serializer<?> keySerializer, Serializer<?> valueSerializer) { // validate serializer configuration, if the passed serializer instance is null, the user must explicitly set a valid serializer configuration value Map<String, Object> newConfigs = new HashMap<>(configs); if (keySerializer != null) newConfigs.put(KEY_SERIALIZER_CLASS_CONFIG, keySerializer.getClass()); else if (newConfigs.get(KEY_SERIALIZER_CLASS_CONFIG) == null) throw new ConfigException(KEY_SERIALIZER_CLASS_CONFIG, null, "must be non-null."); if (valueSerializer != null) newConfigs.put(VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer.getClass()); else if (newConfigs.get(VALUE_SERIALIZER_CLASS_CONFIG) == null) throw new ConfigException(VALUE_SERIALIZER_CLASS_CONFIG, null, "must be non-null."); return newConfigs; } public ProducerConfig(Properties props) { super(CONFIG, props, "producer"); } public ProducerConfig(Map<String, Object> props) { super(CONFIG, props, "producer"); } ProducerConfig(Map<?, ?> props, boolean doLog) { super(CONFIG, props, doLog, "producer"); } public static Set<String> configNames() { return CONFIG.names(); } public static ConfigDef configDef() { return new ConfigDef(CONFIG); } public static void main(String[] args) { System.out.println(CONFIG.toHtml(4, config -> "producerconfigs_" + config)); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/ProducerInterceptor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer; import org.apache.kafka.common.Configurable; /** * A plugin interface that allows you to intercept (and possibly mutate) the records received by the producer before * they are published to the Kafka cluster. * <p> * This class will get producer config properties via <code>configure()</code> method, including clientId assigned * by KafkaProducer if not specified in the producer config. The interceptor implementation needs to be aware that it will be * sharing producer config namespace with other interceptors and serializers, and ensure that there are no conflicts. * <p> * Exceptions thrown by ProducerInterceptor methods will be caught, logged, but not propagated further. As a result, if * the user configures the interceptor with the wrong key and value type parameters, the producer will not throw an exception, * just log the errors. * <p> * ProducerInterceptor callbacks may be called from multiple threads. Interceptor implementation must ensure thread-safety, if needed. * <p> * Implement {@link org.apache.kafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. */ public interface ProducerInterceptor<K, V> extends Configurable, AutoCloseable { /** * This is called from {@link org.apache.kafka.clients.producer.KafkaProducer#send(ProducerRecord)} and * {@link org.apache.kafka.clients.producer.KafkaProducer#send(ProducerRecord, Callback)} methods, before key and value * get serialized and partition is assigned (if partition is not specified in ProducerRecord). * <p> * This method is allowed to modify the record, in which case, the new record will be returned. The implication of modifying * key/value is that partition assignment (if not specified in ProducerRecord) will be done based on modified key/value, * not key/value from the client. Consequently, key and value transformation done in onSend() needs to be consistent: * same key and value should mutate to the same (modified) key and value. Otherwise, log compaction would not work * as expected. * <p> * Similarly, it is up to interceptor implementation to ensure that correct topic/partition is returned in ProducerRecord. * Most often, it should be the same topic/partition from 'record'. * <p> * Any exception thrown by this method will be caught by the caller and logged, but not propagated further. * <p> * Since the producer may run multiple interceptors, a particular interceptor's onSend() callback will be called in the order * specified by {@link org.apache.kafka.clients.producer.ProducerConfig#INTERCEPTOR_CLASSES_CONFIG}. The first interceptor * in the list gets the record passed from the client, the following interceptor will be passed the record returned by the * previous interceptor, and so on. Since interceptors are allowed to modify records, interceptors may potentially get * the record already modified by other interceptors. However, building a pipeline of mutable interceptors that depend on the output * of the previous interceptor is discouraged, because of potential side-effects caused by interceptors potentially failing to * modify the record and throwing an exception. If one of the interceptors in the list throws an exception from onSend(), the exception * is caught, logged, and the next interceptor is called with the record returned by the last successful interceptor in the list, * or otherwise the client. * * @param record the record from client or the record returned by the previous interceptor in the chain of interceptors. * @return producer record to send to topic/partition */ ProducerRecord<K, V> onSend(ProducerRecord<K, V> record); /** * This method is called when the record sent to the server has been acknowledged, or when sending the record fails before * it gets sent to the server. * <p> * This method is generally called just before the user callback is called, and in additional cases when <code>KafkaProducer.send()</code> * throws an exception. * <p> * Any exception thrown by this method will be ignored by the caller. * <p> * This method will generally execute in the background I/O thread, so the implementation should be reasonably fast. * Otherwise, sending of messages from other threads could be delayed. * * @param metadata The metadata for the record that was sent (i.e. the partition and offset). * If an error occurred, metadata will contain only valid topic and maybe * partition. If partition is not given in ProducerRecord and an error occurs * before partition gets assigned, then partition will be set to RecordMetadata.NO_PARTITION. * The metadata may be null if the client passed null record to * {@link org.apache.kafka.clients.producer.KafkaProducer#send(ProducerRecord)}. * @param exception The exception thrown during processing of this record. Null if no error occurred. */ void onAcknowledgement(RecordMetadata metadata, Exception exception); /** * This is called when interceptor is closed */ void close(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/ProducerRecord.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; import java.util.Objects; /** * A key/value pair to be sent to Kafka. This consists of a topic name to which the record is being sent, an optional * partition number, and an optional key and value. * <p> * If a valid partition number is specified that partition will be used when sending the record. If no partition is * specified but a key is present a partition will be chosen using a hash of the key. If neither key nor partition is * present a partition will be assigned in a round-robin fashion. Note that partition numbers are 0-indexed. * <p> * The record also has an associated timestamp. If the user did not provide a timestamp, the producer will stamp the * record with its current time. The timestamp eventually used by Kafka depends on the timestamp type configured for * the topic. * <li> * If the topic is configured to use {@link org.apache.kafka.common.record.TimestampType#CREATE_TIME CreateTime}, * the timestamp in the producer record will be used by the broker. * </li> * <li> * If the topic is configured to use {@link org.apache.kafka.common.record.TimestampType#LOG_APPEND_TIME LogAppendTime}, * the timestamp in the producer record will be overwritten by the broker with the broker local time when it appends the * message to its log. * </li> * <p> * In either of the cases above, the timestamp that has actually been used will be returned to user in * {@link RecordMetadata} */ public class ProducerRecord<K, V> { private final String topic; private final Integer partition; private final Headers headers; private final K key; private final V value; private final Long timestamp; /** * Creates a record with a specified timestamp to be sent to a specified topic and partition * * @param topic The topic the record will be appended to * @param partition The partition to which the record should be sent * @param timestamp The timestamp of the record, in milliseconds since epoch. If null, the producer will assign * the timestamp using System.currentTimeMillis(). * @param key The key that will be included in the record * @param value The record contents * @param headers the headers that will be included in the record */ public ProducerRecord(String topic, Integer partition, Long timestamp, K key, V value, Iterable<Header> headers) { if (topic == null) throw new IllegalArgumentException("Topic cannot be null."); if (timestamp != null && timestamp < 0) throw new IllegalArgumentException( String.format("Invalid timestamp: %d. Timestamp should always be non-negative or null.", timestamp)); if (partition != null && partition < 0) throw new IllegalArgumentException( String.format("Invalid partition: %d. Partition number should always be non-negative or null.", partition)); this.topic = topic; this.partition = partition; this.key = key; this.value = value; this.timestamp = timestamp; this.headers = new RecordHeaders(headers); } /** * Creates a record with a specified timestamp to be sent to a specified topic and partition * * @param topic The topic the record will be appended to * @param partition The partition to which the record should be sent * @param timestamp The timestamp of the record, in milliseconds since epoch. If null, the producer will assign the * timestamp using System.currentTimeMillis(). * @param key The key that will be included in the record * @param value The record contents */ public ProducerRecord(String topic, Integer partition, Long timestamp, K key, V value) { this(topic, partition, timestamp, key, value, null); } /** * Creates a record to be sent to a specified topic and partition * * @param topic The topic the record will be appended to * @param partition The partition to which the record should be sent * @param key The key that will be included in the record * @param value The record contents * @param headers The headers that will be included in the record */ public ProducerRecord(String topic, Integer partition, K key, V value, Iterable<Header> headers) { this(topic, partition, null, key, value, headers); } /** * Creates a record to be sent to a specified topic and partition * * @param topic The topic the record will be appended to * @param partition The partition to which the record should be sent * @param key The key that will be included in the record * @param value The record contents */ public ProducerRecord(String topic, Integer partition, K key, V value) { this(topic, partition, null, key, value, null); } /** * Create a record to be sent to Kafka * * @param topic The topic the record will be appended to * @param key The key that will be included in the record * @param value The record contents */ public ProducerRecord(String topic, K key, V value) { this(topic, null, null, key, value, null); } /** * Create a record with no key * * @param topic The topic this record should be sent to * @param value The record contents */ public ProducerRecord(String topic, V value) { this(topic, null, null, null, value, null); } /** * @return The topic this record is being sent to */ public String topic() { return topic; } /** * @return The headers */ public Headers headers() { return headers; } /** * @return The key (or null if no key is specified) */ public K key() { return key; } /** * @return The value */ public V value() { return value; } /** * @return The timestamp, which is in milliseconds since epoch. */ public Long timestamp() { return timestamp; } /** * @return The partition to which the record will be sent (or null if no partition was specified) */ public Integer partition() { return partition; } @Override public String toString() { String headers = this.headers == null ? "null" : this.headers.toString(); String key = this.key == null ? "null" : this.key.toString(); String value = this.value == null ? "null" : this.value.toString(); String timestamp = this.timestamp == null ? "null" : this.timestamp.toString(); return "ProducerRecord(topic=" + topic + ", partition=" + partition + ", headers=" + headers + ", key=" + key + ", value=" + value + ", timestamp=" + timestamp + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; else if (!(o instanceof ProducerRecord)) return false; ProducerRecord<?, ?> that = (ProducerRecord<?, ?>) o; return Objects.equals(key, that.key) && Objects.equals(partition, that.partition) && Objects.equals(topic, that.topic) && Objects.equals(headers, that.headers) && Objects.equals(value, that.value) && Objects.equals(timestamp, that.timestamp); } @Override public int hashCode() { int result = topic != null ? topic.hashCode() : 0; result = 31 * result + (partition != null ? partition.hashCode() : 0); result = 31 * result + (headers != null ? headers.hashCode() : 0); result = 31 * result + (key != null ? key.hashCode() : 0); result = 31 * result + (value != null ? value.hashCode() : 0); result = 31 * result + (timestamp != null ? timestamp.hashCode() : 0); return result; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/RecordMetadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.requests.ProduceResponse; /** * The metadata for a record that has been acknowledged by the server */ public final class RecordMetadata { /** * Partition value for record without partition assigned */ public static final int UNKNOWN_PARTITION = -1; private final long offset; // The timestamp of the message. // If LogAppendTime is used for the topic, the timestamp will be the timestamp returned by the broker. // If CreateTime is used for the topic, the timestamp is the timestamp in the corresponding ProducerRecord if the // user provided one. Otherwise, it will be the producer local time when the producer record was handed to the // producer. private final long timestamp; private final int serializedKeySize; private final int serializedValueSize; private final TopicPartition topicPartition; /** * Creates a new instance with the provided parameters. */ public RecordMetadata(TopicPartition topicPartition, long baseOffset, int batchIndex, long timestamp, int serializedKeySize, int serializedValueSize) { // ignore the batchIndex if the base offset is -1, since this indicates the offset is unknown this.offset = baseOffset == -1 ? baseOffset : baseOffset + batchIndex; this.timestamp = timestamp; this.serializedKeySize = serializedKeySize; this.serializedValueSize = serializedValueSize; this.topicPartition = topicPartition; } /** * Creates a new instance with the provided parameters. * * @deprecated use constructor without `checksum` parameter. This constructor will be removed in * Apache Kafka 4.0 (deprecated since 3.0). */ @Deprecated public RecordMetadata(TopicPartition topicPartition, long baseOffset, long batchIndex, long timestamp, Long checksum, int serializedKeySize, int serializedValueSize) { this(topicPartition, baseOffset, batchIndexToInt(batchIndex), timestamp, serializedKeySize, serializedValueSize); } private static int batchIndexToInt(long batchIndex) { if (batchIndex > Integer.MAX_VALUE) throw new IllegalArgumentException("batchIndex is larger than Integer.MAX_VALUE: " + batchIndex); return (int) batchIndex; } /** * Indicates whether the record metadata includes the offset. * @return true if the offset is included in the metadata, false otherwise. */ public boolean hasOffset() { return this.offset != ProduceResponse.INVALID_OFFSET; } /** * The offset of the record in the topic/partition. * @return the offset of the record, or -1 if {{@link #hasOffset()}} returns false. */ public long offset() { return this.offset; } /** * Indicates whether the record metadata includes the timestamp. * @return true if a valid timestamp exists, false otherwise. */ public boolean hasTimestamp() { return this.timestamp != RecordBatch.NO_TIMESTAMP; } /** * The timestamp of the record in the topic/partition. * * @return the timestamp of the record, or -1 if the {{@link #hasTimestamp()}} returns false. */ public long timestamp() { return this.timestamp; } /** * The size of the serialized, uncompressed key in bytes. If key is null, the returned size * is -1. */ public int serializedKeySize() { return this.serializedKeySize; } /** * The size of the serialized, uncompressed value in bytes. If value is null, the returned * size is -1. */ public int serializedValueSize() { return this.serializedValueSize; } /** * The topic the record was appended to */ public String topic() { return this.topicPartition.topic(); } /** * The partition the record was sent to */ public int partition() { return this.topicPartition.partition(); } @Override public String toString() { return topicPartition.toString() + "@" + offset; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/RoundRobinPartitioner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.utils.Utils; /** * The "Round-Robin" partitioner * * This partitioning strategy can be used when user wants * to distribute the writes to all partitions equally. This * is the behaviour regardless of record key hash. * */ public class RoundRobinPartitioner implements Partitioner { private final ConcurrentMap<String, AtomicInteger> topicCounterMap = new ConcurrentHashMap<>(); public void configure(Map<String, ?> configs) {} /** * Compute the partition for the given record. * * @param topic The topic name * @param key The key to partition on (or null if no key) * @param keyBytes serialized key to partition on (or null if no key) * @param value The value to partition on or null * @param valueBytes serialized value to partition on or null * @param cluster The current cluster metadata */ @Override public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (!availablePartitions.isEmpty()) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { // no partitions are available, give a non-available partition return Utils.toPositive(nextValue) % numPartitions; } } private int nextValue(String topic) { AtomicInteger counter = topicCounterMap.computeIfAbsent(topic, k -> new AtomicInteger(0)); return counter.getAndIncrement(); } public void close() {} }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/UniformStickyPartitioner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer; import java.util.Map; import org.apache.kafka.clients.producer.internals.StickyPartitionCache; import org.apache.kafka.common.Cluster; /** * @deprecated Since 3.3.0, in order to use default partitioning logic * remove the {@code partitioner.class} configuration setting and set {@code partitioner.ignore.keys=true}. * See <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-794%3A+Strictly+Uniform+Sticky+Partitioner">KIP-794</a> for more info. * * The partitioning strategy: * <ul> * <li>If a partition is specified in the record, use it * <li>Otherwise choose the sticky partition that changes when the batch is full. * * NOTE: In contrast to the DefaultPartitioner, the record key is NOT used as part of the partitioning strategy in this * partitioner. Records with the same key are not guaranteed to be sent to the same partition. * * See <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-480%3A+Sticky+Partitioner">KIP-480</a> for details about sticky partitioning. */ @Deprecated public class UniformStickyPartitioner implements Partitioner { private final StickyPartitionCache stickyPartitionCache = new StickyPartitionCache(); public void configure(Map<String, ?> configs) {} /** * Compute the partition for the given record. * * @param topic The topic name * @param key The key to partition on (or null if no key) * @param keyBytes serialized key to partition on (or null if no key) * @param value The value to partition on or null * @param valueBytes serialized value to partition on or null * @param cluster The current cluster metadata */ public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { return stickyPartitionCache.partition(topic, cluster); } public void close() {} /** * If a batch completed for the current sticky partition, change the sticky partition. * Alternately, if no sticky partition has been determined, set one. */ @SuppressWarnings("deprecation") public void onNewBatch(String topic, Cluster cluster, int prevPartition) { stickyPartitionCache.nextPartition(topic, cluster, prevPartition); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides a Kafka client for producing records to topics and/or partitions in a Kafka cluster. */ package org.apache.kafka.clients.producer;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/BufferPool.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import java.nio.ByteBuffer; import java.util.ArrayDeque; import java.util.Deque; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import org.apache.kafka.clients.producer.BufferExhaustedException; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Meter; import org.apache.kafka.common.utils.Time; /** * A pool of ByteBuffers kept under a given memory limit. This class is fairly specific to the needs of the producer. In * particular it has the following properties: * <ol> * <li>There is a special "poolable size" and buffers of this size are kept in a free list and recycled * <li>It is fair. That is all memory is given to the longest waiting thread until it has sufficient memory. This * prevents starvation or deadlock when a thread asks for a large chunk of memory and needs to block until multiple * buffers are deallocated. * </ol> */ public class BufferPool { static final String WAIT_TIME_SENSOR_NAME = "bufferpool-wait-time"; private final long totalMemory; private final int poolableSize; private final ReentrantLock lock; private final Deque<ByteBuffer> free; private final Deque<Condition> waiters; /** Total available memory is the sum of nonPooledAvailableMemory and the number of byte buffers in free * poolableSize. */ private long nonPooledAvailableMemory; private final Metrics metrics; private final Time time; private final Sensor waitTime; private boolean closed; /** * Create a new buffer pool * * @param memory The maximum amount of memory that this buffer pool can allocate * @param poolableSize The buffer size to cache in the free list rather than deallocating * @param metrics instance of Metrics * @param time time instance * @param metricGrpName logical group name for metrics */ public BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName) { this.poolableSize = poolableSize; this.lock = new ReentrantLock(); this.free = new ArrayDeque<>(); this.waiters = new ArrayDeque<>(); this.totalMemory = memory; this.nonPooledAvailableMemory = memory; this.metrics = metrics; this.time = time; this.waitTime = this.metrics.sensor(WAIT_TIME_SENSOR_NAME); MetricName rateMetricName = metrics.metricName("bufferpool-wait-ratio", metricGrpName, "The fraction of time an appender waits for space allocation."); MetricName totalMetricName = metrics.metricName("bufferpool-wait-time-total", metricGrpName, "*Deprecated* The total time an appender waits for space allocation."); MetricName totalNsMetricName = metrics.metricName("bufferpool-wait-time-ns-total", metricGrpName, "The total time in nanoseconds an appender waits for space allocation."); Sensor bufferExhaustedRecordSensor = metrics.sensor("buffer-exhausted-records"); MetricName bufferExhaustedRateMetricName = metrics.metricName("buffer-exhausted-rate", metricGrpName, "The average per-second number of record sends that are dropped due to buffer exhaustion"); MetricName bufferExhaustedTotalMetricName = metrics.metricName("buffer-exhausted-total", metricGrpName, "The total number of record sends that are dropped due to buffer exhaustion"); bufferExhaustedRecordSensor.add(new Meter(bufferExhaustedRateMetricName, bufferExhaustedTotalMetricName)); this.waitTime.add(new Meter(TimeUnit.NANOSECONDS, rateMetricName, totalMetricName)); this.waitTime.add(new Meter(TimeUnit.NANOSECONDS, rateMetricName, totalNsMetricName)); this.closed = false; } /** * Allocate a buffer of the given size. This method blocks if there is not enough memory and the buffer pool * is configured with blocking mode. * * @param size The buffer size to allocate in bytes * @param maxTimeToBlockMs The maximum time in milliseconds to block for buffer memory to be available * @return The buffer * @throws InterruptedException If the thread is interrupted while blocked * @throws IllegalArgumentException if size is larger than the total memory controlled by the pool (and hence we would block * forever) */ public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedException { if (size > this.totalMemory) throw new IllegalArgumentException("Attempt to allocate " + size + " bytes, but there is a hard limit of " + this.totalMemory + " on memory allocations."); ByteBuffer buffer = null; this.lock.lock(); if (this.closed) { this.lock.unlock(); throw new KafkaException("Producer closed while allocating memory"); } try { // check if we have a free buffer of the right size pooled if (size == poolableSize && !this.free.isEmpty()) return this.free.pollFirst(); // now check if the request is immediately satisfiable with the // memory on hand or if we need to block int freeListSize = freeSize() * this.poolableSize; if (this.nonPooledAvailableMemory + freeListSize >= size) { // we have enough unallocated or pooled memory to immediately // satisfy the request, but need to allocate the buffer freeUp(size); this.nonPooledAvailableMemory -= size; } else { // we are out of memory and will have to block int accumulated = 0; Condition moreMemory = this.lock.newCondition(); try { long remainingTimeToBlockNs = TimeUnit.MILLISECONDS.toNanos(maxTimeToBlockMs); this.waiters.addLast(moreMemory); // loop over and over until we have a buffer or have reserved // enough memory to allocate one while (accumulated < size) { long startWaitNs = time.nanoseconds(); long timeNs; boolean waitingTimeElapsed; try { waitingTimeElapsed = !moreMemory.await(remainingTimeToBlockNs, TimeUnit.NANOSECONDS); } finally { long endWaitNs = time.nanoseconds(); timeNs = Math.max(0L, endWaitNs - startWaitNs); recordWaitTime(timeNs); } if (this.closed) throw new KafkaException("Producer closed while allocating memory"); if (waitingTimeElapsed) { this.metrics.sensor("buffer-exhausted-records").record(); throw new BufferExhaustedException("Failed to allocate " + size + " bytes within the configured max blocking time " + maxTimeToBlockMs + " ms. Total memory: " + totalMemory() + " bytes. Available memory: " + availableMemory() + " bytes. Poolable size: " + poolableSize() + " bytes"); } remainingTimeToBlockNs -= timeNs; // check if we can satisfy this request from the free list, // otherwise allocate memory if (accumulated == 0 && size == this.poolableSize && !this.free.isEmpty()) { // just grab a buffer from the free list buffer = this.free.pollFirst(); accumulated = size; } else { // we'll need to allocate memory, but we may only get // part of what we need on this iteration freeUp(size - accumulated); int got = (int) Math.min(size - accumulated, this.nonPooledAvailableMemory); this.nonPooledAvailableMemory -= got; accumulated += got; } } // Don't reclaim memory on throwable since nothing was thrown accumulated = 0; } finally { // When this loop was not able to successfully terminate don't loose available memory this.nonPooledAvailableMemory += accumulated; this.waiters.remove(moreMemory); } } } finally { // signal any additional waiters if there is more memory left // over for them try { if (!(this.nonPooledAvailableMemory == 0 && this.free.isEmpty()) && !this.waiters.isEmpty()) this.waiters.peekFirst().signal(); } finally { // Another finally... otherwise find bugs complains lock.unlock(); } } if (buffer == null) return safeAllocateByteBuffer(size); else return buffer; } // Protected for testing protected void recordWaitTime(long timeNs) { this.waitTime.record(timeNs, time.milliseconds()); } /** * Allocate a buffer. If buffer allocation fails (e.g. because of OOM) then return the size count back to * available memory and signal the next waiter if it exists. */ private ByteBuffer safeAllocateByteBuffer(int size) { boolean error = true; try { ByteBuffer buffer = allocateByteBuffer(size); error = false; return buffer; } finally { if (error) { this.lock.lock(); try { this.nonPooledAvailableMemory += size; if (!this.waiters.isEmpty()) this.waiters.peekFirst().signal(); } finally { this.lock.unlock(); } } } } // Protected for testing. protected ByteBuffer allocateByteBuffer(int size) { return ByteBuffer.allocate(size); } /** * Attempt to ensure we have at least the requested number of bytes of memory for allocation by deallocating pooled * buffers (if needed) */ private void freeUp(int size) { while (!this.free.isEmpty() && this.nonPooledAvailableMemory < size) this.nonPooledAvailableMemory += this.free.pollLast().capacity(); } /** * Return buffers to the pool. If they are of the poolable size add them to the free list, otherwise just mark the * memory as free. * * @param buffer The buffer to return * @param size The size of the buffer to mark as deallocated, note that this may be smaller than buffer.capacity * since the buffer may re-allocate itself during in-place compression */ public void deallocate(ByteBuffer buffer, int size) { lock.lock(); try { if (size == this.poolableSize && size == buffer.capacity()) { buffer.clear(); this.free.add(buffer); } else { this.nonPooledAvailableMemory += size; } Condition moreMem = this.waiters.peekFirst(); if (moreMem != null) moreMem.signal(); } finally { lock.unlock(); } } public void deallocate(ByteBuffer buffer) { if (buffer != null) deallocate(buffer, buffer.capacity()); } /** * the total free memory both unallocated and in the free list */ public long availableMemory() { lock.lock(); try { return this.nonPooledAvailableMemory + freeSize() * (long) this.poolableSize; } finally { lock.unlock(); } } // Protected for testing. protected int freeSize() { return this.free.size(); } /** * Get the unallocated memory (not in the free list or in use) */ public long unallocatedMemory() { lock.lock(); try { return this.nonPooledAvailableMemory; } finally { lock.unlock(); } } /** * The number of threads blocked waiting on memory */ public int queued() { lock.lock(); try { return this.waiters.size(); } finally { lock.unlock(); } } /** * The buffer size that will be retained in the free list after use */ public int poolableSize() { return this.poolableSize; } /** * The total memory managed by this pool */ public long totalMemory() { return this.totalMemory; } // package-private method used only for testing Deque<Condition> waiters() { return this.waiters; } /** * Closes the buffer pool. Memory will be prevented from being allocated, but may be deallocated. All allocations * awaiting available memory will be notified to abort. */ public void close() { this.lock.lock(); this.closed = true; try { for (Condition waiter : this.waiters) waiter.signal(); } finally { this.lock.unlock(); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/BuiltInPartitioner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import java.util.Arrays; import java.util.List; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; /** * Built-in default partitioner. Note, that this is just a utility class that is used directly from * RecordAccumulator, it does not implement the Partitioner interface. * * The class keeps track of various bookkeeping information required for adaptive sticky partitioning * (described in detail in KIP-794). There is one partitioner object per topic. */ public class BuiltInPartitioner { private final Logger log; private final String topic; private final int stickyBatchSize; private volatile PartitionLoadStats partitionLoadStats = null; private final AtomicReference<StickyPartitionInfo> stickyPartitionInfo = new AtomicReference<>(); // Visible and used for testing only. static volatile public Supplier<Integer> mockRandom = null; /** * BuiltInPartitioner constructor. * * @param topic The topic * @param stickyBatchSize How much to produce to partition before switch */ public BuiltInPartitioner(LogContext logContext, String topic, int stickyBatchSize) { this.log = logContext.logger(BuiltInPartitioner.class); this.topic = topic; if (stickyBatchSize < 1) { throw new IllegalArgumentException("stickyBatchSize must be >= 1 but got " + stickyBatchSize); } this.stickyBatchSize = stickyBatchSize; } /** * Calculate the next partition for the topic based on the partition load stats. */ private int nextPartition(Cluster cluster) { int random = mockRandom != null ? mockRandom.get() : Utils.toPositive(ThreadLocalRandom.current().nextInt()); // Cache volatile variable in local variable. PartitionLoadStats partitionLoadStats = this.partitionLoadStats; int partition; if (partitionLoadStats == null) { // We don't have stats to do adaptive partitioning (or it's disabled), just switch to the next // partition based on uniform distribution. List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { partition = availablePartitions.get(random % availablePartitions.size()).partition(); } else { // We don't have available partitions, just pick one among all partitions. List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); partition = random % partitions.size(); } } else { // Calculate next partition based on load distribution. // Note that partitions without leader are excluded from the partitionLoadStats. assert partitionLoadStats.length > 0; int[] cumulativeFrequencyTable = partitionLoadStats.cumulativeFrequencyTable; int weightedRandom = random % cumulativeFrequencyTable[partitionLoadStats.length - 1]; // By construction, the cumulative frequency table is sorted, so we can use binary // search to find the desired index. int searchResult = Arrays.binarySearch(cumulativeFrequencyTable, 0, partitionLoadStats.length, weightedRandom); // binarySearch results the index of the found element, or -(insertion_point) - 1 // (where insertion_point is the index of the first element greater than the key). // We need to get the index of the first value that is strictly greater, which // would be the insertion point, except if we found the element that's equal to // the searched value (in this case we need to get next). For example, if we have // 4 5 8 // and we're looking for 3, then we'd get the insertion_point = 0, and the function // would return -0 - 1 = -1, by adding 1 we'd get 0. If we're looking for 4, we'd // get 0, and we need the next one, so adding 1 works here as well. int partitionIndex = Math.abs(searchResult + 1); assert partitionIndex < partitionLoadStats.length; partition = partitionLoadStats.partitionIds[partitionIndex]; } log.trace("Switching to partition {} in topic {}", partition, topic); return partition; } /** * Test-only function. When partition load stats are defined, return the end of range for the * random number. */ public int loadStatsRangeEnd() { assert partitionLoadStats != null; assert partitionLoadStats.length > 0; return partitionLoadStats.cumulativeFrequencyTable[partitionLoadStats.length - 1]; } /** * Peek currently chosen sticky partition. This method works in conjunction with {@link #isPartitionChanged} * and {@link #updatePartitionInfo}. The workflow is the following: * * 1. peekCurrentPartitionInfo is called to know which partition to lock. * 2. Lock partition's batch queue. * 3. isPartitionChanged under lock to make sure that nobody raced us. * 4. Append data to buffer. * 5. updatePartitionInfo to update produced bytes and maybe switch partition. * * It's important that steps 3-5 are under partition's batch queue lock. * * @param cluster The cluster information (needed if there is no current partition) * @return sticky partition info object */ StickyPartitionInfo peekCurrentPartitionInfo(Cluster cluster) { StickyPartitionInfo partitionInfo = stickyPartitionInfo.get(); if (partitionInfo != null) return partitionInfo; // We're the first to create it. partitionInfo = new StickyPartitionInfo(nextPartition(cluster)); if (stickyPartitionInfo.compareAndSet(null, partitionInfo)) return partitionInfo; // Someone has raced us. return stickyPartitionInfo.get(); } /** * Check if partition is changed by a concurrent thread. NOTE this function needs to be called under * the partition's batch queue lock. * * @param partitionInfo The sticky partition info object returned by peekCurrentPartitionInfo * @return true if sticky partition object is changed (race condition) */ boolean isPartitionChanged(StickyPartitionInfo partitionInfo) { // partitionInfo may be null if the caller didn't use built-in partitioner. return partitionInfo != null && stickyPartitionInfo.get() != partitionInfo; } /** * Update partition info with the number of bytes appended and maybe switch partition. * NOTE this function needs to be called under the partition's batch queue lock. * * @param partitionInfo The sticky partition info object returned by peekCurrentPartitionInfo * @param appendedBytes The number of bytes appended to this partition * @param cluster The cluster information */ void updatePartitionInfo(StickyPartitionInfo partitionInfo, int appendedBytes, Cluster cluster) { updatePartitionInfo(partitionInfo, appendedBytes, cluster, true); } /** * Update partition info with the number of bytes appended and maybe switch partition. * NOTE this function needs to be called under the partition's batch queue lock. * * @param partitionInfo The sticky partition info object returned by peekCurrentPartitionInfo * @param appendedBytes The number of bytes appended to this partition * @param cluster The cluster information * @param enableSwitch If true, switch partition once produced enough bytes */ void updatePartitionInfo(StickyPartitionInfo partitionInfo, int appendedBytes, Cluster cluster, boolean enableSwitch) { // partitionInfo may be null if the caller didn't use built-in partitioner. if (partitionInfo == null) return; assert partitionInfo == stickyPartitionInfo.get(); int producedBytes = partitionInfo.producedBytes.addAndGet(appendedBytes); // We're trying to switch partition once we produce stickyBatchSize bytes to a partition // but doing so may hinder batching because partition switch may happen while batch isn't // ready to send. This situation is especially likely with high linger.ms setting. // Consider the following example: // linger.ms=500, producer produces 12KB in 500ms, batch.size=16KB // - first batch collects 12KB in 500ms, gets sent // - second batch collects 4KB, then we switch partition, so 4KB gets eventually sent // - ... and so on - we'd get 12KB and 4KB batches // To get more optimal batching and avoid 4KB fractional batches, the caller may disallow // partition switch if batch is not ready to send, so with the example above we'd avoid // fractional 4KB batches: in that case the scenario would look like this: // - first batch collects 12KB in 500ms, gets sent // - second batch collects 4KB, but partition switch doesn't happen because batch in not ready // - second batch collects 12KB in 500ms, gets sent and now we switch partition. // - ... and so on - we'd just send 12KB batches // We cap the produced bytes to not exceed 2x of the batch size to avoid pathological cases // (e.g. if we have a mix of keyed and unkeyed messages, key messages may create an // unready batch after the batch that disabled partition switch becomes ready). // As a result, with high latency.ms setting we end up switching partitions after producing // between stickyBatchSize and stickyBatchSize * 2 bytes, to better align with batch boundary. if (producedBytes >= stickyBatchSize * 2) { log.trace("Produced {} bytes, exceeding twice the batch size of {} bytes, with switching set to {}", producedBytes, stickyBatchSize, enableSwitch); } if (producedBytes >= stickyBatchSize && enableSwitch || producedBytes >= stickyBatchSize * 2) { // We've produced enough to this partition, switch to next. StickyPartitionInfo newPartitionInfo = new StickyPartitionInfo(nextPartition(cluster)); stickyPartitionInfo.set(newPartitionInfo); } } /** * Update partition load stats from the queue sizes of each partition * NOTE: queueSizes are modified in place to avoid allocations * * @param queueSizes The queue sizes, partitions without leaders are excluded * @param partitionIds The partition ids for the queues, partitions without leaders are excluded * @param length The logical length of the arrays (could be less): we may eliminate some partitions * based on latency, but to avoid reallocation of the arrays, we just decrement * logical length * Visible for testing */ public void updatePartitionLoadStats(int[] queueSizes, int[] partitionIds, int length) { if (queueSizes == null) { log.trace("No load stats for topic {}, not using adaptive", topic); partitionLoadStats = null; return; } assert queueSizes.length == partitionIds.length; assert length <= queueSizes.length; // The queueSizes.length represents the number of all partitions in the topic and if we have // less than 2 partitions, there is no need to do adaptive logic. // If partitioner.availability.timeout.ms != 0, then partitions that experience high latencies // (greater than partitioner.availability.timeout.ms) may be excluded, the length represents // partitions that are not excluded. If some partitions were excluded, we'd still want to // go through adaptive logic, even if we have one partition. // See also RecordAccumulator#partitionReady where the queueSizes are built. if (length < 1 || queueSizes.length < 2) { log.trace("The number of partitions is too small: available={}, all={}, not using adaptive for topic {}", length, queueSizes.length, topic); partitionLoadStats = null; return; } // We build cumulative frequency table from the queue sizes in place. At the beginning // each entry contains queue size, then we invert it (so it represents the frequency) // and convert to a running sum. Then a uniformly distributed random variable // in the range [0..last) would map to a partition with weighted probability. // Example: suppose we have 3 partitions with the corresponding queue sizes: // 0 3 1 // Then we can invert them by subtracting the queue size from the max queue size + 1 = 4: // 4 1 3 // Then we can convert it into a running sum (next value adds previous value): // 4 5 8 // Now if we get a random number in the range [0..8) and find the first value that // is strictly greater than the number (e.g. for 4 it would be 5), then the index of // the value is the index of the partition we're looking for. In this example // random numbers 0, 1, 2, 3 would map to partition[0], 4 would map to partition[1] // and 5, 6, 7 would map to partition[2]. // Calculate max queue size + 1 and check if all sizes are the same. int maxSizePlus1 = queueSizes[0]; boolean allEqual = true; for (int i = 1; i < length; i++) { if (queueSizes[i] != maxSizePlus1) allEqual = false; if (queueSizes[i] > maxSizePlus1) maxSizePlus1 = queueSizes[i]; } ++maxSizePlus1; if (allEqual && length == queueSizes.length) { // No need to have complex probability logic when all queue sizes are the same, // and we didn't exclude partitions that experience high latencies (greater than // partitioner.availability.timeout.ms). log.trace("All queue lengths are the same, not using adaptive for topic {}", topic); partitionLoadStats = null; return; } // Invert and fold the queue size, so that they become separator values in the CFT. queueSizes[0] = maxSizePlus1 - queueSizes[0]; for (int i = 1; i < length; i++) { queueSizes[i] = maxSizePlus1 - queueSizes[i] + queueSizes[i - 1]; } log.trace("Partition load stats for topic {}: CFT={}, IDs={}, length={}", topic, queueSizes, partitionIds, length); partitionLoadStats = new PartitionLoadStats(queueSizes, partitionIds, length); } /** * Info for the current sticky partition. */ public static class StickyPartitionInfo { private final int index; private final AtomicInteger producedBytes = new AtomicInteger(); StickyPartitionInfo(int index) { this.index = index; } public int partition() { return index; } } /* * Default hashing function to choose a partition from the serialized key bytes */ public static int partitionForKey(final byte[] serializedKey, final int numPartitions) { return Utils.toPositive(Utils.murmur2(serializedKey)) % numPartitions; } /** * The partition load stats for each topic that are used for adaptive partition distribution. */ private final static class PartitionLoadStats { public final int[] cumulativeFrequencyTable; public final int[] partitionIds; public final int length; public PartitionLoadStats(int[] cumulativeFrequencyTable, int[] partitionIds, int length) { assert cumulativeFrequencyTable.length == partitionIds.length; assert length <= cumulativeFrequencyTable.length; this.cumulativeFrequencyTable = cumulativeFrequencyTable; this.partitionIds = partitionIds; this.length = length; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/DefaultPartitioner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.clients.producer.Partitioner; import org.apache.kafka.common.Cluster; import java.util.Map; /** * NOTE this partitioner is deprecated and shouldn't be used. To use default partitioning logic * remove partitioner.class configuration setting. See KIP-794 for more info. * * The default partitioning strategy: * <ul> * <li>If a partition is specified in the record, use it * <li>If no partition is specified but a key is present choose a partition based on a hash of the key * <li>If no partition or key is present choose the sticky partition that changes when the batch is full. * * See KIP-480 for details about sticky partitioning. */ @Deprecated public class DefaultPartitioner implements Partitioner { private final StickyPartitionCache stickyPartitionCache = new StickyPartitionCache(); public void configure(Map<String, ?> configs) {} /** * Compute the partition for the given record. * * @param topic The topic name * @param key The key to partition on (or null if no key) * @param keyBytes serialized key to partition on (or null if no key) * @param value The value to partition on or null * @param valueBytes serialized value to partition on or null * @param cluster The current cluster metadata */ public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { return partition(topic, key, keyBytes, value, valueBytes, cluster, cluster.partitionsForTopic(topic).size()); } /** * Compute the partition for the given record. * * @param topic The topic name * @param numPartitions The number of partitions of the given {@code topic} * @param key The key to partition on (or null if no key) * @param keyBytes serialized key to partition on (or null if no key) * @param value The value to partition on or null * @param valueBytes serialized value to partition on or null * @param cluster The current cluster metadata */ public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster, int numPartitions) { if (keyBytes == null) { return stickyPartitionCache.partition(topic, cluster); } return BuiltInPartitioner.partitionForKey(keyBytes, numPartitions); } public void close() {} /** * If a batch completed for the current sticky partition, change the sticky partition. * Alternately, if no sticky partition has been determined, set one. */ @SuppressWarnings("deprecation") public void onNewBatch(String topic, Cluster cluster, int prevPartition) { stickyPartitionCache.nextPartition(topic, cluster, prevPartition); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/ErrorLoggingCallback.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.RecordMetadata; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.nio.charset.StandardCharsets; public class ErrorLoggingCallback implements Callback { private static final Logger log = LoggerFactory.getLogger(ErrorLoggingCallback.class); private String topic; private byte[] key; private byte[] value; private int valueLength; private boolean logAsString; public ErrorLoggingCallback(String topic, byte[] key, byte[] value, boolean logAsString) { this.topic = topic; this.key = key; if (logAsString) { this.value = value; } this.valueLength = value == null ? -1 : value.length; this.logAsString = logAsString; } public void onCompletion(RecordMetadata metadata, Exception e) { if (e != null) { String keyString = (key == null) ? "null" : logAsString ? new String(key, StandardCharsets.UTF_8) : key.length + " bytes"; String valueString = (valueLength == -1) ? "null" : logAsString ? new String(value, StandardCharsets.UTF_8) : valueLength + " bytes"; log.error("Error when sending message to topic {} with key: {}, value: {} with error:", topic, keyString, valueString, e); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/FutureRecordMetadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.utils.Time; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; /** * The future result of a record send */ public final class FutureRecordMetadata implements Future<RecordMetadata> { private final ProduceRequestResult result; private final int batchIndex; private final long createTimestamp; private final int serializedKeySize; private final int serializedValueSize; private final Time time; private volatile FutureRecordMetadata nextRecordMetadata = null; public FutureRecordMetadata(ProduceRequestResult result, int batchIndex, long createTimestamp, int serializedKeySize, int serializedValueSize, Time time) { this.result = result; this.batchIndex = batchIndex; this.createTimestamp = createTimestamp; this.serializedKeySize = serializedKeySize; this.serializedValueSize = serializedValueSize; this.time = time; } @Override public boolean cancel(boolean interrupt) { return false; } @Override public boolean isCancelled() { return false; } @Override public RecordMetadata get() throws InterruptedException, ExecutionException { this.result.await(); if (nextRecordMetadata != null) return nextRecordMetadata.get(); return valueOrError(); } @Override public RecordMetadata get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { // Handle overflow. long now = time.milliseconds(); long timeoutMillis = unit.toMillis(timeout); long deadline = Long.MAX_VALUE - timeoutMillis < now ? Long.MAX_VALUE : now + timeoutMillis; boolean occurred = this.result.await(timeout, unit); if (!occurred) throw new TimeoutException("Timeout after waiting for " + timeoutMillis + " ms."); if (nextRecordMetadata != null) return nextRecordMetadata.get(deadline - time.milliseconds(), TimeUnit.MILLISECONDS); return valueOrError(); } /** * This method is used when we have to split a large batch in smaller ones. A chained metadata will allow the * future that has already returned to the users to wait on the newly created split batches even after the * old big batch has been deemed as done. */ void chain(FutureRecordMetadata futureRecordMetadata) { if (nextRecordMetadata == null) nextRecordMetadata = futureRecordMetadata; else nextRecordMetadata.chain(futureRecordMetadata); } RecordMetadata valueOrError() throws ExecutionException { RuntimeException exception = this.result.error(batchIndex); if (exception != null) throw new ExecutionException(exception); else return value(); } RecordMetadata value() { if (nextRecordMetadata != null) return nextRecordMetadata.value(); return new RecordMetadata(result.topicPartition(), this.result.baseOffset(), this.batchIndex, timestamp(), this.serializedKeySize, this.serializedValueSize); } private long timestamp() { return result.hasLogAppendTime() ? result.logAppendTime() : createTimestamp; } @Override public boolean isDone() { if (nextRecordMetadata != null) return nextRecordMetadata.isDone(); return this.result.completed(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/IncompleteBatches.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import java.util.ArrayList; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; /* * A thread-safe helper class to hold batches that haven't been acknowledged yet (including those * which have and have not been sent). */ class IncompleteBatches { private final Set<ProducerBatch> incomplete; public IncompleteBatches() { this.incomplete = new HashSet<>(); } public void add(ProducerBatch batch) { synchronized (incomplete) { this.incomplete.add(batch); } } public void remove(ProducerBatch batch) { synchronized (incomplete) { boolean removed = this.incomplete.remove(batch); if (!removed) throw new IllegalStateException("Remove from the incomplete set failed. This should be impossible."); } } public Iterable<ProducerBatch> copyAll() { synchronized (incomplete) { return new ArrayList<>(this.incomplete); } } public Iterable<ProduceRequestResult> requestResults() { synchronized (incomplete) { return incomplete.stream().map(batch -> batch.produceFuture).collect(Collectors.toList()); } } public boolean isEmpty() { synchronized (incomplete) { return incomplete.isEmpty(); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/KafkaProducerMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.CumulativeSum; import java.util.Map; public class KafkaProducerMetrics implements AutoCloseable { public static final String GROUP = "producer-metrics"; private static final String FLUSH = "flush"; private static final String TXN_INIT = "txn-init"; private static final String TXN_BEGIN = "txn-begin"; private static final String TXN_SEND_OFFSETS = "txn-send-offsets"; private static final String TXN_COMMIT = "txn-commit"; private static final String TXN_ABORT = "txn-abort"; private static final String TOTAL_TIME_SUFFIX = "-time-ns-total"; private static final String METADATA_WAIT = "metadata-wait"; private final Map<String, String> tags; private final Metrics metrics; private final Sensor initTimeSensor; private final Sensor beginTxnTimeSensor; private final Sensor flushTimeSensor; private final Sensor sendOffsetsSensor; private final Sensor commitTxnSensor; private final Sensor abortTxnSensor; private final Sensor metadataWaitSensor; public KafkaProducerMetrics(Metrics metrics) { this.metrics = metrics; tags = this.metrics.config().tags(); flushTimeSensor = newLatencySensor( FLUSH, "Total time producer has spent in flush in nanoseconds." ); initTimeSensor = newLatencySensor( TXN_INIT, "Total time producer has spent in initTransactions in nanoseconds." ); beginTxnTimeSensor = newLatencySensor( TXN_BEGIN, "Total time producer has spent in beginTransaction in nanoseconds." ); sendOffsetsSensor = newLatencySensor( TXN_SEND_OFFSETS, "Total time producer has spent in sendOffsetsToTransaction in nanoseconds." ); commitTxnSensor = newLatencySensor( TXN_COMMIT, "Total time producer has spent in commitTransaction in nanoseconds." ); abortTxnSensor = newLatencySensor( TXN_ABORT, "Total time producer has spent in abortTransaction in nanoseconds." ); metadataWaitSensor = newLatencySensor( METADATA_WAIT, "Total time producer has spent waiting on topic metadata in nanoseconds." ); } @Override public void close() { removeMetric(FLUSH); removeMetric(TXN_INIT); removeMetric(TXN_BEGIN); removeMetric(TXN_SEND_OFFSETS); removeMetric(TXN_COMMIT); removeMetric(TXN_ABORT); removeMetric(METADATA_WAIT); } public void recordFlush(long duration) { flushTimeSensor.record(duration); } public void recordInit(long duration) { initTimeSensor.record(duration); } public void recordBeginTxn(long duration) { beginTxnTimeSensor.record(duration); } public void recordSendOffsets(long duration) { sendOffsetsSensor.record(duration); } public void recordCommitTxn(long duration) { commitTxnSensor.record(duration); } public void recordAbortTxn(long duration) { abortTxnSensor.record(duration); } public void recordMetadataWait(long duration) { metadataWaitSensor.record(duration); } private Sensor newLatencySensor(String name, String description) { Sensor sensor = metrics.sensor(name + TOTAL_TIME_SUFFIX); sensor.add(metricName(name, description), new CumulativeSum()); return sensor; } private MetricName metricName(final String name, final String description) { return metrics.metricName(name + TOTAL_TIME_SUFFIX, GROUP, description, tags); } private void removeMetric(final String name) { metrics.removeSensor(name + TOTAL_TIME_SUFFIX); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/ProduceRequestResult.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.record.RecordBatch; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Function; /** * A class that models the future completion of a produce request for a single partition. There is one of these per * partition in a produce request and it is shared by all the {@link RecordMetadata} instances that are batched together * for the same partition in the request. */ public class ProduceRequestResult { private final CountDownLatch latch = new CountDownLatch(1); private final TopicPartition topicPartition; private volatile Long baseOffset = null; private volatile long logAppendTime = RecordBatch.NO_TIMESTAMP; private volatile Function<Integer, RuntimeException> errorsByIndex; /** * Create an instance of this class. * * @param topicPartition The topic and partition to which this record set was sent was sent */ public ProduceRequestResult(TopicPartition topicPartition) { this.topicPartition = topicPartition; } /** * Set the result of the produce request. * * @param baseOffset The base offset assigned to the record * @param logAppendTime The log append time or -1 if CreateTime is being used * @param errorsByIndex Function mapping the batch index to the exception, or null if the response was successful */ public void set(long baseOffset, long logAppendTime, Function<Integer, RuntimeException> errorsByIndex) { this.baseOffset = baseOffset; this.logAppendTime = logAppendTime; this.errorsByIndex = errorsByIndex; } /** * Mark this request as complete and unblock any threads waiting on its completion. */ public void done() { if (baseOffset == null) throw new IllegalStateException("The method `set` must be invoked before this method."); this.latch.countDown(); } /** * Await the completion of this request */ public void await() throws InterruptedException { latch.await(); } /** * Await the completion of this request (up to the given time interval) * @param timeout The maximum time to wait * @param unit The unit for the max time * @return true if the request completed, false if we timed out */ public boolean await(long timeout, TimeUnit unit) throws InterruptedException { return latch.await(timeout, unit); } /** * The base offset for the request (the first offset in the record set) */ public long baseOffset() { return baseOffset; } /** * Return true if log append time is being used for this topic */ public boolean hasLogAppendTime() { return logAppendTime != RecordBatch.NO_TIMESTAMP; } /** * The log append time or -1 if CreateTime is being used */ public long logAppendTime() { return logAppendTime; } /** * The error thrown (generally on the server) while processing this request */ public RuntimeException error(int batchIndex) { if (errorsByIndex == null) { return null; } else { return errorsByIndex.apply(batchIndex); } } /** * The topic and partition to which the record was appended */ public TopicPartition topicPartition() { return topicPartition; } /** * Has the request completed? */ public boolean completed() { return this.latch.getCount() == 0L; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/ProducerBatch.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.RecordBatchTooLargeException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.record.AbstractRecords; import org.apache.kafka.common.record.CompressionRatioEstimator; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.MemoryRecordsBuilder; import org.apache.kafka.common.record.MutableRecordBatch; import org.apache.kafka.common.record.Record; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.requests.ProduceResponse; import org.apache.kafka.common.utils.ProducerIdAndEpoch; import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.nio.ByteBuffer; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Deque; import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import static org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V2; import static org.apache.kafka.common.record.RecordBatch.NO_TIMESTAMP; /** * A batch of records that is or will be sent. * * This class is not thread safe and external synchronization must be used when modifying it */ public final class ProducerBatch { private static final Logger log = LoggerFactory.getLogger(ProducerBatch.class); private enum FinalState { ABORTED, FAILED, SUCCEEDED } final long createdMs; final TopicPartition topicPartition; final ProduceRequestResult produceFuture; private final List<Thunk> thunks = new ArrayList<>(); private final MemoryRecordsBuilder recordsBuilder; private final AtomicInteger attempts = new AtomicInteger(0); private final boolean isSplitBatch; private final AtomicReference<FinalState> finalState = new AtomicReference<>(null); int recordCount; int maxRecordSize; private long lastAttemptMs; private long lastAppendTime; private long drainedMs; private boolean retry; private boolean reopened; public ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long createdMs) { this(tp, recordsBuilder, createdMs, false); } public ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long createdMs, boolean isSplitBatch) { this.createdMs = createdMs; this.lastAttemptMs = createdMs; this.recordsBuilder = recordsBuilder; this.topicPartition = tp; this.lastAppendTime = createdMs; this.produceFuture = new ProduceRequestResult(topicPartition); this.retry = false; this.isSplitBatch = isSplitBatch; float compressionRatioEstimation = CompressionRatioEstimator.estimation(topicPartition.topic(), recordsBuilder.compressionType()); recordsBuilder.setEstimatedCompressionRatio(compressionRatioEstimation); } /** * Append the record to the current record set and return the relative offset within that record set * * @return The RecordSend corresponding to this record or null if there isn't sufficient room. */ public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, key == null ? -1 : key.length, value == null ? -1 : value.length, Time.SYSTEM); // we have to keep every future returned to the users in case the batch needs to be // split to several new batches and resent. thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } } /** * This method is only used by {@link #split(int)} when splitting a large batch to smaller ones. * @return true if the record has been successfully appended, false otherwise. */ private boolean tryAppendForSplit(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers, Thunk thunk) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return false; } else { // No need to get the CRC. this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, key == null ? -1 : key.remaining(), value == null ? -1 : value.remaining(), Time.SYSTEM); // Chain the future to the original thunk. thunk.future.chain(future); this.thunks.add(thunk); this.recordCount++; return true; } } /** * Abort the batch and complete the future and callbacks. * * @param exception The exception to use to complete the future and awaiting callbacks. */ public void abort(RuntimeException exception) { if (!finalState.compareAndSet(null, FinalState.ABORTED)) throw new IllegalStateException("Batch has already been completed in final state " + finalState.get()); log.trace("Aborting batch for partition {}", topicPartition, exception); completeFutureAndFireCallbacks(ProduceResponse.INVALID_OFFSET, RecordBatch.NO_TIMESTAMP, index -> exception); } /** * Check if the batch has been completed (either successfully or exceptionally). * @return `true` if the batch has been completed, `false` otherwise. */ public boolean isDone() { return finalState() != null; } /** * Complete the batch successfully. * @param baseOffset The base offset of the messages assigned by the server * @param logAppendTime The log append time or -1 if CreateTime is being used * @return true if the batch was completed as a result of this call, and false * if it had been completed previously */ public boolean complete(long baseOffset, long logAppendTime) { return done(baseOffset, logAppendTime, null, null); } /** * Complete the batch exceptionally. The provided top-level exception will be used * for each record future contained in the batch. * * @param topLevelException top-level partition error * @param recordExceptions Record exception function mapping batchIndex to the respective record exception * @return true if the batch was completed as a result of this call, and false * if it had been completed previously */ public boolean completeExceptionally( RuntimeException topLevelException, Function<Integer, RuntimeException> recordExceptions ) { Objects.requireNonNull(topLevelException); Objects.requireNonNull(recordExceptions); return done(ProduceResponse.INVALID_OFFSET, RecordBatch.NO_TIMESTAMP, topLevelException, recordExceptions); } /** * Finalize the state of a batch. Final state, once set, is immutable. This function may be called * once or twice on a batch. It may be called twice if * 1. An inflight batch expires before a response from the broker is received. The batch's final * state is set to FAILED. But it could succeed on the broker and second time around batch.done() may * try to set SUCCEEDED final state. * 2. If a transaction abortion happens or if the producer is closed forcefully, the final state is * ABORTED but again it could succeed if broker responds with a success. * * Attempted transitions from [FAILED | ABORTED] --> SUCCEEDED are logged. * Attempted transitions from one failure state to the same or a different failed state are ignored. * Attempted transitions from SUCCEEDED to the same or a failed state throw an exception. * * @param baseOffset The base offset of the messages assigned by the server * @param logAppendTime The log append time or -1 if CreateTime is being used * @param topLevelException The exception that occurred (or null if the request was successful) * @param recordExceptions Record exception function mapping batchIndex to the respective record exception * @return true if the batch was completed successfully and false if the batch was previously aborted */ private boolean done( long baseOffset, long logAppendTime, RuntimeException topLevelException, Function<Integer, RuntimeException> recordExceptions ) { final FinalState tryFinalState = (topLevelException == null) ? FinalState.SUCCEEDED : FinalState.FAILED; if (tryFinalState == FinalState.SUCCEEDED) { log.trace("Successfully produced messages to {} with base offset {}.", topicPartition, baseOffset); } else { log.trace("Failed to produce messages to {} with base offset {}.", topicPartition, baseOffset, topLevelException); } if (this.finalState.compareAndSet(null, tryFinalState)) { completeFutureAndFireCallbacks(baseOffset, logAppendTime, recordExceptions); return true; } if (this.finalState.get() != FinalState.SUCCEEDED) { if (tryFinalState == FinalState.SUCCEEDED) { // Log if a previously unsuccessful batch succeeded later on. log.debug("ProduceResponse returned {} for {} after batch with base offset {} had already been {}.", tryFinalState, topicPartition, baseOffset, this.finalState.get()); } else { // FAILED --> FAILED and ABORTED --> FAILED transitions are ignored. log.debug("Ignored state transition {} -> {} for {} batch with base offset {}", this.finalState.get(), tryFinalState, topicPartition, baseOffset); } } else { // A SUCCESSFUL batch must not attempt another state change. throw new IllegalStateException("A " + this.finalState.get() + " batch must not attempt another state change to " + tryFinalState); } return false; } private void completeFutureAndFireCallbacks( long baseOffset, long logAppendTime, Function<Integer, RuntimeException> recordExceptions ) { // Set the future before invoking the callbacks as we rely on its state for the `onCompletion` call produceFuture.set(baseOffset, logAppendTime, recordExceptions); // execute callbacks for (int i = 0; i < thunks.size(); i++) { try { Thunk thunk = thunks.get(i); if (thunk.callback != null) { if (recordExceptions == null) { RecordMetadata metadata = thunk.future.value(); thunk.callback.onCompletion(metadata, null); } else { RuntimeException exception = recordExceptions.apply(i); thunk.callback.onCompletion(null, exception); } } } catch (Exception e) { log.error("Error executing user-provided callback on message for topic-partition '{}'", topicPartition, e); } } produceFuture.done(); } public Deque<ProducerBatch> split(int splitBatchSize) { Deque<ProducerBatch> batches = new ArrayDeque<>(); MemoryRecords memoryRecords = recordsBuilder.build(); Iterator<MutableRecordBatch> recordBatchIter = memoryRecords.batches().iterator(); if (!recordBatchIter.hasNext()) throw new IllegalStateException("Cannot split an empty producer batch."); RecordBatch recordBatch = recordBatchIter.next(); if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed()) throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " + "with version v0 and v1"); if (recordBatchIter.hasNext()) throw new IllegalArgumentException("A producer batch should only have one record batch."); Iterator<Thunk> thunkIter = thunks.iterator(); // We always allocate batch size because we are already splitting a big batch. // And we also Retain the create time of the original batch. ProducerBatch batch = null; for (Record record : recordBatch) { assert thunkIter.hasNext(); Thunk thunk = thunkIter.next(); if (batch == null) batch = createBatchOffAccumulatorForRecord(record, splitBatchSize); // A newly created batch can always host the first message. if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) { batches.add(batch); batch.closeForRecordAppends(); batch = createBatchOffAccumulatorForRecord(record, splitBatchSize); batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk); } } // Close the last batch and add it to the batch list after split. if (batch != null) { batches.add(batch); batch.closeForRecordAppends(); } produceFuture.set(ProduceResponse.INVALID_OFFSET, NO_TIMESTAMP, index -> new RecordBatchTooLargeException()); produceFuture.done(); if (hasSequence()) { int sequence = baseSequence(); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch()); for (ProducerBatch newBatch : batches) { newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional()); sequence += newBatch.recordCount; } } return batches; } private ProducerBatch createBatchOffAccumulatorForRecord(Record record, int batchSize) { int initialSize = Math.max(AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), record.key(), record.value(), record.headers()), batchSize); ByteBuffer buffer = ByteBuffer.allocate(initialSize); // Note that we intentionally do not set producer state (producerId, epoch, sequence, and isTransactional) // for the newly created batch. This will be set when the batch is dequeued for sending (which is consistent // with how normal batches are handled). MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic(), recordsBuilder.compressionType(), TimestampType.CREATE_TIME, 0L); return new ProducerBatch(topicPartition, builder, this.createdMs, true); } public boolean isCompressed() { return recordsBuilder.compressionType() != CompressionType.NONE; } /** * A callback and the associated FutureRecordMetadata argument to pass to it. */ final private static class Thunk { final Callback callback; final FutureRecordMetadata future; Thunk(Callback callback, FutureRecordMetadata future) { this.callback = callback; this.future = future; } } @Override public String toString() { return "ProducerBatch(topicPartition=" + topicPartition + ", recordCount=" + recordCount + ")"; } boolean hasReachedDeliveryTimeout(long deliveryTimeoutMs, long now) { return deliveryTimeoutMs <= now - this.createdMs; } public FinalState finalState() { return this.finalState.get(); } int attempts() { return attempts.get(); } void reenqueued(long now) { attempts.getAndIncrement(); lastAttemptMs = Math.max(lastAppendTime, now); lastAppendTime = Math.max(lastAppendTime, now); retry = true; } long queueTimeMs() { return drainedMs - createdMs; } long waitedTimeMs(long nowMs) { return Math.max(0, nowMs - lastAttemptMs); } void drained(long nowMs) { this.drainedMs = Math.max(drainedMs, nowMs); } boolean isSplitBatch() { return isSplitBatch; } /** * Returns if the batch is been retried for sending to kafka */ public boolean inRetry() { return this.retry; } public MemoryRecords records() { return recordsBuilder.build(); } public int estimatedSizeInBytes() { return recordsBuilder.estimatedSizeInBytes(); } public double compressionRatio() { return recordsBuilder.compressionRatio(); } public boolean isFull() { return recordsBuilder.isFull(); } public void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional) { recordsBuilder.setProducerState(producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, baseSequence, isTransactional); } public void resetProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional) { log.info("Resetting sequence number of batch with current sequence {} for partition {} to {}", this.baseSequence(), this.topicPartition, baseSequence); reopened = true; recordsBuilder.reopenAndRewriteProducerState(producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, baseSequence, isTransactional); } /** * Release resources required for record appends (e.g. compression buffers). Once this method is called, it's only * possible to update the RecordBatch header. */ public void closeForRecordAppends() { recordsBuilder.closeForRecordAppends(); } public void close() { recordsBuilder.close(); if (!recordsBuilder.isControlBatch()) { CompressionRatioEstimator.updateEstimation(topicPartition.topic(), recordsBuilder.compressionType(), (float) recordsBuilder.compressionRatio()); } reopened = false; } /** * Abort the record builder and reset the state of the underlying buffer. This is used prior to aborting * the batch with {@link #abort(RuntimeException)} and ensures that no record previously appended can be * read. This is used in scenarios where we want to ensure a batch ultimately gets aborted, but in which * it is not safe to invoke the completion callbacks (e.g. because we are holding a lock, such as * when aborting batches in {@link RecordAccumulator}). */ public void abortRecordAppends() { recordsBuilder.abort(); } public boolean isClosed() { return recordsBuilder.isClosed(); } public ByteBuffer buffer() { return recordsBuilder.buffer(); } public int initialCapacity() { return recordsBuilder.initialCapacity(); } public boolean isWritable() { return !recordsBuilder.isClosed(); } public byte magic() { return recordsBuilder.magic(); } public long producerId() { return recordsBuilder.producerId(); } public short producerEpoch() { return recordsBuilder.producerEpoch(); } public int baseSequence() { return recordsBuilder.baseSequence(); } public int lastSequence() { return recordsBuilder.baseSequence() + recordsBuilder.numRecords() - 1; } public boolean hasSequence() { return baseSequence() != RecordBatch.NO_SEQUENCE; } public boolean isTransactional() { return recordsBuilder.isTransactional(); } public boolean sequenceHasBeenReset() { return reopened; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.clients.producer.ProducerInterceptor; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.record.RecordBatch; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Closeable; import java.util.List; /** * A container that holds the list {@link org.apache.kafka.clients.producer.ProducerInterceptor} * and wraps calls to the chain of custom interceptors. */ public class ProducerInterceptors<K, V> implements Closeable { private static final Logger log = LoggerFactory.getLogger(ProducerInterceptors.class); private final List<ProducerInterceptor<K, V>> interceptors; public ProducerInterceptors(List<ProducerInterceptor<K, V>> interceptors) { this.interceptors = interceptors; } /** * This is called when client sends the record to KafkaProducer, before key and value gets serialized. * The method calls {@link ProducerInterceptor#onSend(ProducerRecord)} method. ProducerRecord * returned from the first interceptor's onSend() is passed to the second interceptor onSend(), and so on in the * interceptor chain. The record returned from the last interceptor is returned from this method. * * This method does not throw exceptions. Exceptions thrown by any of interceptor methods are caught and ignored. * If an interceptor in the middle of the chain, that normally modifies the record, throws an exception, * the next interceptor in the chain will be called with a record returned by the previous interceptor that did not * throw an exception. * * @param record the record from client * @return producer record to send to topic/partition */ public ProducerRecord<K, V> onSend(ProducerRecord<K, V> record) { ProducerRecord<K, V> interceptRecord = record; for (ProducerInterceptor<K, V> interceptor : this.interceptors) { try { interceptRecord = interceptor.onSend(interceptRecord); } catch (Exception e) { // do not propagate interceptor exception, log and continue calling other interceptors // be careful not to throw exception from here if (record != null) log.warn("Error executing interceptor onSend callback for topic: {}, partition: {}", record.topic(), record.partition(), e); else log.warn("Error executing interceptor onSend callback", e); } } return interceptRecord; } /** * This method is called when the record sent to the server has been acknowledged, or when sending the record fails before * it gets sent to the server. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception)} * method for each interceptor. * * This method does not throw exceptions. Exceptions thrown by any of interceptor methods are caught and ignored. * * @param metadata The metadata for the record that was sent (i.e. the partition and offset). * If an error occurred, metadata will only contain valid topic and maybe partition. * @param exception The exception thrown during processing of this record. Null if no error occurred. */ public void onAcknowledgement(RecordMetadata metadata, Exception exception) { for (ProducerInterceptor<K, V> interceptor : this.interceptors) { try { interceptor.onAcknowledgement(metadata, exception); } catch (Exception e) { // do not propagate interceptor exceptions, just log log.warn("Error executing interceptor onAcknowledgement callback", e); } } } /** * This method is called when sending the record fails in {@link ProducerInterceptor#onSend * (ProducerRecord)} method. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception)} * method for each interceptor * * @param record The record from client * @param interceptTopicPartition The topic/partition for the record if an error occurred * after partition gets assigned; the topic part of interceptTopicPartition is the same as in record. * @param exception The exception thrown during processing of this record. */ public void onSendError(ProducerRecord<K, V> record, TopicPartition interceptTopicPartition, Exception exception) { for (ProducerInterceptor<K, V> interceptor : this.interceptors) { try { if (record == null && interceptTopicPartition == null) { interceptor.onAcknowledgement(null, exception); } else { if (interceptTopicPartition == null) { interceptTopicPartition = extractTopicPartition(record); } interceptor.onAcknowledgement(new RecordMetadata(interceptTopicPartition, -1, -1, RecordBatch.NO_TIMESTAMP, -1, -1), exception); } } catch (Exception e) { // do not propagate interceptor exceptions, just log log.warn("Error executing interceptor onAcknowledgement callback", e); } } } public static <K, V> TopicPartition extractTopicPartition(ProducerRecord<K, V> record) { return new TopicPartition(record.topic(), record.partition() == null ? RecordMetadata.UNKNOWN_PARTITION : record.partition()); } /** * Closes every interceptor in a container. */ @Override public void close() { for (ProducerInterceptor<K, V> interceptor : this.interceptors) { try { interceptor.close(); } catch (Exception e) { log.error("Failed to close producer interceptor ", e); } } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/ProducerMetadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.clients.Metadata; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Objects; import java.util.Set; public class ProducerMetadata extends Metadata { // If a topic hasn't been accessed for this many milliseconds, it is removed from the cache. private final long metadataIdleMs; /* Topics with expiry time */ private final Map<String, Long> topics = new HashMap<>(); private final Set<String> newTopics = new HashSet<>(); private final Logger log; private final Time time; public ProducerMetadata(long refreshBackoffMs, long metadataExpireMs, long metadataIdleMs, LogContext logContext, ClusterResourceListeners clusterResourceListeners, Time time) { super(refreshBackoffMs, metadataExpireMs, logContext, clusterResourceListeners); this.metadataIdleMs = metadataIdleMs; this.log = logContext.logger(ProducerMetadata.class); this.time = time; } @Override public synchronized MetadataRequest.Builder newMetadataRequestBuilder() { return new MetadataRequest.Builder(new ArrayList<>(topics.keySet()), true); } @Override public synchronized MetadataRequest.Builder newMetadataRequestBuilderForNewTopics() { return new MetadataRequest.Builder(new ArrayList<>(newTopics), true); } public synchronized void add(String topic, long nowMs) { Objects.requireNonNull(topic, "topic cannot be null"); if (topics.put(topic, nowMs + metadataIdleMs) == null) { newTopics.add(topic); requestUpdateForNewTopics(); } } public synchronized int requestUpdateForTopic(String topic) { if (newTopics.contains(topic)) { return requestUpdateForNewTopics(); } else { return requestUpdate(); } } // Visible for testing synchronized Set<String> topics() { return topics.keySet(); } // Visible for testing synchronized Set<String> newTopics() { return newTopics; } public synchronized boolean containsTopic(String topic) { return topics.containsKey(topic); } @Override public synchronized boolean retainTopic(String topic, boolean isInternal, long nowMs) { Long expireMs = topics.get(topic); if (expireMs == null) { return false; } else if (newTopics.contains(topic)) { return true; } else if (expireMs <= nowMs) { log.debug("Removing unused topic {} from the metadata list, expiryMs {} now {}", topic, expireMs, nowMs); topics.remove(topic); return false; } else { return true; } } /** * Wait for metadata update until the current version is larger than the last version we know of */ public synchronized void awaitUpdate(final int lastVersion, final long timeoutMs) throws InterruptedException { long currentTimeMs = time.milliseconds(); long deadlineMs = currentTimeMs + timeoutMs < 0 ? Long.MAX_VALUE : currentTimeMs + timeoutMs; time.waitObject(this, () -> { // Throw fatal exceptions, if there are any. Recoverable topic errors will be handled by the caller. maybeThrowFatalException(); return updateVersion() > lastVersion || isClosed(); }, deadlineMs); if (isClosed()) throw new KafkaException("Requested metadata update after close"); } @Override public synchronized void update(int requestVersion, MetadataResponse response, boolean isPartialUpdate, long nowMs) { super.update(requestVersion, response, isPartialUpdate, nowMs); // Remove all topics in the response that are in the new topic set. Note that if an error was encountered for a // new topic's metadata, then any work to resolve the error will include the topic in a full metadata update. if (!newTopics.isEmpty()) { for (MetadataResponse.TopicMetadata metadata : response.topicMetadata()) { newTopics.remove(metadata.topic()); } } notifyAll(); } @Override public synchronized void fatalError(KafkaException fatalException) { super.fatalError(fatalException); notifyAll(); } /** * Close this instance and notify any awaiting threads. */ @Override public synchronized void close() { super.close(); notifyAll(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/ProducerMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import org.apache.kafka.common.MetricNameTemplate; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; public class ProducerMetrics { public final SenderMetricsRegistry senderMetrics; private final Metrics metrics; public ProducerMetrics(Metrics metrics) { this.metrics = metrics; this.senderMetrics = new SenderMetricsRegistry(this.metrics); } private List<MetricNameTemplate> getAllTemplates() { List<MetricNameTemplate> l = new ArrayList<>(this.senderMetrics.allTemplates()); return l; } public static void main(String[] args) { Map<String, String> metricTags = Collections.singletonMap("client-id", "client-id"); MetricConfig metricConfig = new MetricConfig().tags(metricTags); Metrics metrics = new Metrics(metricConfig); ProducerMetrics metricsRegistry = new ProducerMetrics(metrics); System.out.println(Metrics.toHtmlTable("kafka.producer", metricsRegistry.getAllTemplates())); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/RecordAccumulator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import java.nio.ByteBuffer; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; import java.util.Deque; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.utils.ProducerIdAndEpoch; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.record.AbstractRecords; import org.apache.kafka.common.record.CompressionRatioEstimator; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.MemoryRecordsBuilder; import org.apache.kafka.common.record.Record; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.CopyOnWriteMap; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; /** * This class acts as a queue that accumulates records into {@link MemoryRecords} * instances to be sent to the server. * <p> * The accumulator uses a bounded amount of memory and append calls will block when that memory is exhausted, unless * this behavior is explicitly disabled. */ public class RecordAccumulator { private final LogContext logContext; private final Logger log; private volatile boolean closed; private final AtomicInteger flushesInProgress; private final AtomicInteger appendsInProgress; private final int batchSize; private CompressionType compression; // ** Changed by superstream - removed final private final int lingerMs; private final long retryBackoffMs; private final int deliveryTimeoutMs; private final long partitionAvailabilityTimeoutMs; // latency threshold for marking partition temporary unavailable private final boolean enableAdaptivePartitioning; private final BufferPool free; private final Time time; private final ApiVersions apiVersions; private final ConcurrentMap<String /*topic*/, TopicInfo> topicInfoMap = new CopyOnWriteMap<>(); private final ConcurrentMap<Integer /*nodeId*/, NodeLatencyStats> nodeStats = new CopyOnWriteMap<>(); private final IncompleteBatches incomplete; // The following variables are only accessed by the sender thread, so we don't need to protect them. private final Set<TopicPartition> muted; private final Map<String, Integer> nodesDrainIndex; private final TransactionManager transactionManager; private long nextBatchExpiryTimeMs = Long.MAX_VALUE; // the earliest time (absolute) a batch will expire. //** added by superstream private volatile CompressionType superstreamCompression = null; public synchronized void updateCompressionType(CompressionType newCompressionType) { if (newCompressionType == this.superstreamCompression) { return; } this.superstreamCompression = newCompressionType; } // added by superstream ** /** * Create a new record accumulator * * @param logContext The log context used for logging * @param batchSize The size to use when allocating {@link MemoryRecords} instances * @param compression The compression codec for the records * @param lingerMs An artificial delay time to add before declaring a records instance that isn't full ready for * sending. This allows time for more records to arrive. Setting a non-zero lingerMs will trade off some * latency for potentially better throughput due to more batching (and hence fewer, larger requests). * @param retryBackoffMs An artificial delay time to retry the produce request upon receiving an error. This avoids * exhausting all retries in a short period of time. * @param deliveryTimeoutMs An upper bound on the time to report success or failure on record delivery * @param partitionerConfig Partitioner config * @param metrics The metrics * @param metricGrpName The metric group name * @param time The time instance to use * @param apiVersions Request API versions for current connected brokers * @param transactionManager The shared transaction state object which tracks producer IDs, epochs, and sequence * numbers per partition. * @param bufferPool The buffer pool */ public RecordAccumulator(LogContext logContext, int batchSize, CompressionType compression, int lingerMs, long retryBackoffMs, int deliveryTimeoutMs, PartitionerConfig partitionerConfig, Metrics metrics, String metricGrpName, Time time, ApiVersions apiVersions, TransactionManager transactionManager, BufferPool bufferPool) { this.logContext = logContext; this.log = logContext.logger(RecordAccumulator.class); this.closed = false; this.flushesInProgress = new AtomicInteger(0); this.appendsInProgress = new AtomicInteger(0); this.batchSize = batchSize; this.compression = compression; this.lingerMs = lingerMs; this.retryBackoffMs = retryBackoffMs; this.deliveryTimeoutMs = deliveryTimeoutMs; this.enableAdaptivePartitioning = partitionerConfig.enableAdaptivePartitioning; this.partitionAvailabilityTimeoutMs = partitionerConfig.partitionAvailabilityTimeoutMs; this.free = bufferPool; this.incomplete = new IncompleteBatches(); this.muted = new HashSet<>(); this.time = time; this.apiVersions = apiVersions; nodesDrainIndex = new HashMap<>(); this.transactionManager = transactionManager; registerMetrics(metrics, metricGrpName); } /** * Create a new record accumulator with default partitioner config * * @param logContext The log context used for logging * @param batchSize The size to use when allocating {@link MemoryRecords} instances * @param compression The compression codec for the records * @param lingerMs An artificial delay time to add before declaring a records instance that isn't full ready for * sending. This allows time for more records to arrive. Setting a non-zero lingerMs will trade off some * latency for potentially better throughput due to more batching (and hence fewer, larger requests). * @param retryBackoffMs An artificial delay time to retry the produce request upon receiving an error. This avoids * exhausting all retries in a short period of time. * @param deliveryTimeoutMs An upper bound on the time to report success or failure on record delivery * @param metrics The metrics * @param metricGrpName The metric group name * @param time The time instance to use * @param apiVersions Request API versions for current connected brokers * @param transactionManager The shared transaction state object which tracks producer IDs, epochs, and sequence * numbers per partition. * @param bufferPool The buffer pool */ public RecordAccumulator(LogContext logContext, int batchSize, CompressionType compression, int lingerMs, long retryBackoffMs, int deliveryTimeoutMs, Metrics metrics, String metricGrpName, Time time, ApiVersions apiVersions, TransactionManager transactionManager, BufferPool bufferPool) { this(logContext, batchSize, compression, lingerMs, retryBackoffMs, deliveryTimeoutMs, new PartitionerConfig(), metrics, metricGrpName, time, apiVersions, transactionManager, bufferPool); } private void registerMetrics(Metrics metrics, String metricGrpName) { metrics.addMetric( metrics.metricName("waiting-threads", metricGrpName, "The number of user threads blocked waiting for buffer memory to enqueue their records"), (config, now) -> free.queued()); metrics.addMetric( metrics.metricName("buffer-total-bytes", metricGrpName, "The maximum amount of buffer memory the client can use (whether or not it is currently used)."), (config, now) -> free.totalMemory()); metrics.addMetric( metrics.metricName("buffer-available-bytes", metricGrpName, "The total amount of buffer memory that is not being used (either unallocated or in the free list)."), (config, now) -> free.availableMemory()); } private void setPartition(AppendCallbacks callbacks, int partition) { if (callbacks != null) callbacks.setPartition(partition); } /** * Check if partition concurrently changed, or we need to complete previously disabled partition change. * * @param topic The topic * @param topicInfo The topic info * @param partitionInfo The built-in partitioner's partition info * @param deque The partition queue * @param nowMs The current time, in milliseconds * @param cluster THe cluster metadata * @return 'true' if partition changed and we need to get new partition info and retry, * 'false' otherwise */ private boolean partitionChanged(String topic, TopicInfo topicInfo, BuiltInPartitioner.StickyPartitionInfo partitionInfo, Deque<ProducerBatch> deque, long nowMs, Cluster cluster) { if (topicInfo.builtInPartitioner.isPartitionChanged(partitionInfo)) { log.trace("Partition {} for topic {} switched by a concurrent append, retrying", partitionInfo.partition(), topic); return true; } // We might have disabled partition switch if the queue had incomplete batches. // Check if all batches are full now and switch . if (allBatchesFull(deque)) { topicInfo.builtInPartitioner.updatePartitionInfo(partitionInfo, 0, cluster, true); if (topicInfo.builtInPartitioner.isPartitionChanged(partitionInfo)) { log.trace("Completed previously disabled switch for topic {} partition {}, retrying", topic, partitionInfo.partition()); return true; } } return false; } /** * Add a record to the accumulator, return the append result * <p> * The append result will contain the future metadata, and flag for whether the appended batch is full or a new batch is created * <p> * * @param topic The topic to which this record is being sent * @param partition The partition to which this record is being sent or RecordMetadata.UNKNOWN_PARTITION * if any partition could be used * @param timestamp The timestamp of the record * @param key The key for the record * @param value The value for the record * @param headers the Headers for the record * @param callbacks The callbacks to execute * @param maxTimeToBlock The maximum time in milliseconds to block for buffer memory to be available * @param abortOnNewBatch A boolean that indicates returning before a new batch is created and * running the partitioner's onNewBatch method before trying to append again * @param nowMs The current time, in milliseconds * @param cluster The cluster metadata */ public RecordAppendResult append(String topic, int partition, long timestamp, byte[] key, byte[] value, Header[] headers, AppendCallbacks callbacks, long maxTimeToBlock, boolean abortOnNewBatch, long nowMs, Cluster cluster) throws InterruptedException { TopicInfo topicInfo = topicInfoMap.computeIfAbsent(topic, k -> new TopicInfo(logContext, k, batchSize)); // We keep track of the number of appending thread to make sure we do not miss batches in // abortIncompleteBatches(). appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { // Loop to retry in case we encounter partitioner's race conditions. while (true) { // If the message doesn't have any partition affinity, so we pick a partition based on the broker // availability and performance. Note, that here we peek current partition before we hold the // deque lock, so we'll need to make sure that it's not changed while we were waiting for the // deque lock. final BuiltInPartitioner.StickyPartitionInfo partitionInfo; final int effectivePartition; if (partition == RecordMetadata.UNKNOWN_PARTITION) { partitionInfo = topicInfo.builtInPartitioner.peekCurrentPartitionInfo(cluster); effectivePartition = partitionInfo.partition(); } else { partitionInfo = null; effectivePartition = partition; } // Now that we know the effective partition, let the caller know. setPartition(callbacks, effectivePartition); // check if we have an in-progress batch Deque<ProducerBatch> dq = topicInfo.batches.computeIfAbsent(effectivePartition, k -> new ArrayDeque<>()); synchronized (dq) { // After taking the lock, validate that the partition hasn't changed and retry. if (partitionChanged(topic, topicInfo, partitionInfo, dq, nowMs, cluster)) continue; RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callbacks, dq, nowMs); if (appendResult != null) { // If queue has incomplete batches we disable switch (see comments in updatePartitionInfo). boolean enableSwitch = allBatchesFull(dq); topicInfo.builtInPartitioner.updatePartitionInfo(partitionInfo, appendResult.appendedBytes, cluster, enableSwitch); return appendResult; } } // we don't have an in-progress record batch try to allocate a new batch if (abortOnNewBatch) { // Return a result that will cause another call to append. return new RecordAppendResult(null, false, false, true, 0); } if (buffer == null) { byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {} with remaining timeout {}ms", size, topic, partition, maxTimeToBlock); // This call may block if we exhausted buffer space. buffer = free.allocate(size, maxTimeToBlock); // Update the current time in case the buffer allocation blocked above. // NOTE: getting time may be expensive, so calling it under a lock // should be avoided. nowMs = time.milliseconds(); } synchronized (dq) { // After taking the lock, validate that the partition hasn't changed and retry. if (partitionChanged(topic, topicInfo, partitionInfo, dq, nowMs, cluster)) continue; RecordAppendResult appendResult = appendNewBatch(topic, effectivePartition, dq, timestamp, key, value, headers, callbacks, buffer, nowMs); // Set buffer to null, so that deallocate doesn't return it back to free pool, since it's used in the batch. if (appendResult.newBatchCreated) buffer = null; // If queue has incomplete batches we disable switch (see comments in updatePartitionInfo). boolean enableSwitch = allBatchesFull(dq); topicInfo.builtInPartitioner.updatePartitionInfo(partitionInfo, appendResult.appendedBytes, cluster, enableSwitch); return appendResult; } } } finally { free.deallocate(buffer); appendsInProgress.decrementAndGet(); } } /** * Append a new batch to the queue * * @param topic The topic * @param partition The partition (cannot be RecordMetadata.UNKNOWN_PARTITION) * @param dq The queue * @param timestamp The timestamp of the record * @param key The key for the record * @param value The value for the record * @param headers the Headers for the record * @param callbacks The callbacks to execute * @param buffer The buffer for the new batch * @param nowMs The current time, in milliseconds */ private RecordAppendResult appendNewBatch(String topic, int partition, Deque<ProducerBatch> dq, long timestamp, byte[] key, byte[] value, Header[] headers, AppendCallbacks callbacks, ByteBuffer buffer, long nowMs) { assert partition != RecordMetadata.UNKNOWN_PARTITION; RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callbacks, dq, nowMs); if (appendResult != null) { // Somebody else found us a batch, return the one we waited for! Hopefully this doesn't happen often... return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, apiVersions.maxUsableProduceMagic()); ProducerBatch batch = new ProducerBatch(new TopicPartition(topic, partition), recordsBuilder, nowMs); FutureRecordMetadata future = Objects.requireNonNull(batch.tryAppend(timestamp, key, value, headers, callbacks, nowMs)); dq.addLast(batch); incomplete.add(batch); return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true, false, batch.estimatedSizeInBytes()); } private MemoryRecordsBuilder recordsBuilder(ByteBuffer buffer, byte maxUsableMagic) { if (transactionManager != null && maxUsableMagic < RecordBatch.MAGIC_VALUE_V2) { throw new UnsupportedVersionException("Attempting to use idempotence with a broker which does not " + "support the required message format (v2). The broker must be version 0.11 or later."); } // ** added by superstream if (superstreamCompression != null && compression != superstreamCompression) { log.info("Superstream: updated compression type from {} to {}", compression, superstreamCompression); compression = superstreamCompression; } // ** added by superstream return MemoryRecords.builder(buffer, maxUsableMagic, compression, TimestampType.CREATE_TIME, 0L); } /** * Check if all batches in the queue are full. */ private boolean allBatchesFull(Deque<ProducerBatch> deque) { // Only the last batch may be incomplete, so we just check that. ProducerBatch last = deque.peekLast(); return last == null || last.isFull(); } /** * Try to append to a ProducerBatch. * * If it is full, we return null and a new batch is created. We also close the batch for record appends to free up * resources like compression buffers. The batch will be fully closed (ie. the record batch headers will be written * and memory records built) in one of the following cases (whichever comes first): right before send, * if it is expired, or when the producer is closed. */ private RecordAppendResult tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, Deque<ProducerBatch> deque, long nowMs) { if (closed) throw new KafkaException("Producer closed while send in progress"); ProducerBatch last = deque.peekLast(); if (last != null) { int initialBytes = last.estimatedSizeInBytes(); FutureRecordMetadata future = last.tryAppend(timestamp, key, value, headers, callback, nowMs); if (future == null) { last.closeForRecordAppends(); } else { int appendedBytes = last.estimatedSizeInBytes() - initialBytes; return new RecordAppendResult(future, deque.size() > 1 || last.isFull(), false, false, appendedBytes); } } return null; } private boolean isMuted(TopicPartition tp) { return muted.contains(tp); } public void resetNextBatchExpiryTime() { nextBatchExpiryTimeMs = Long.MAX_VALUE; } public void maybeUpdateNextBatchExpiryTime(ProducerBatch batch) { if (batch.createdMs + deliveryTimeoutMs > 0) { // the non-negative check is to guard us against potential overflow due to setting // a large value for deliveryTimeoutMs nextBatchExpiryTimeMs = Math.min(nextBatchExpiryTimeMs, batch.createdMs + deliveryTimeoutMs); } else { log.warn("Skipping next batch expiry time update due to addition overflow: " + "batch.createMs={}, deliveryTimeoutMs={}", batch.createdMs, deliveryTimeoutMs); } } /** * Get a list of batches which have been sitting in the accumulator too long and need to be expired. */ public List<ProducerBatch> expiredBatches(long now) { List<ProducerBatch> expiredBatches = new ArrayList<>(); for (TopicInfo topicInfo : topicInfoMap.values()) { for (Deque<ProducerBatch> deque : topicInfo.batches.values()) { // expire the batches in the order of sending synchronized (deque) { while (!deque.isEmpty()) { ProducerBatch batch = deque.getFirst(); if (batch.hasReachedDeliveryTimeout(deliveryTimeoutMs, now)) { deque.poll(); batch.abortRecordAppends(); expiredBatches.add(batch); } else { maybeUpdateNextBatchExpiryTime(batch); break; } } } } } return expiredBatches; } public long getDeliveryTimeoutMs() { return deliveryTimeoutMs; } /** * Re-enqueue the given record batch in the accumulator. In Sender.completeBatch method, we check * whether the batch has reached deliveryTimeoutMs or not. Hence we do not do the delivery timeout check here. */ public void reenqueue(ProducerBatch batch, long now) { batch.reenqueued(now); Deque<ProducerBatch> deque = getOrCreateDeque(batch.topicPartition); synchronized (deque) { if (transactionManager != null) insertInSequenceOrder(deque, batch); else deque.addFirst(batch); } } /** * Split the big batch that has been rejected and reenqueue the split batches in to the accumulator. * @return the number of split batches. */ public int splitAndReenqueue(ProducerBatch bigBatch) { // Reset the estimated compression ratio to the initial value or the big batch compression ratio, whichever // is bigger. There are several different ways to do the reset. We chose the most conservative one to ensure // the split doesn't happen too often. CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression, Math.max(1.0f, (float) bigBatch.compressionRatio())); Deque<ProducerBatch> dq = bigBatch.split(this.batchSize); int numSplitBatches = dq.size(); Deque<ProducerBatch> partitionDequeue = getOrCreateDeque(bigBatch.topicPartition); while (!dq.isEmpty()) { ProducerBatch batch = dq.pollLast(); incomplete.add(batch); // We treat the newly split batches as if they are not even tried. synchronized (partitionDequeue) { if (transactionManager != null) { // We should track the newly created batches since they already have assigned sequences. transactionManager.addInFlightBatch(batch); insertInSequenceOrder(partitionDequeue, batch); } else { partitionDequeue.addFirst(batch); } } } return numSplitBatches; } // We will have to do extra work to ensure the queue is in order when requests are being retried and there are // multiple requests in flight to that partition. If the first in flight request fails to append, then all the // subsequent in flight requests will also fail because the sequence numbers will not be accepted. // // Further, once batches are being retried, we are reduced to a single in flight request for that partition. So when // the subsequent batches come back in sequence order, they will have to be placed further back in the queue. // // Note that this assumes that all the batches in the queue which have an assigned sequence also have the current // producer id. We will not attempt to reorder messages if the producer id has changed, we will throw an // IllegalStateException instead. private void insertInSequenceOrder(Deque<ProducerBatch> deque, ProducerBatch batch) { // When we are re-enqueueing and have enabled idempotence, the re-enqueued batch must always have a sequence. if (batch.baseSequence() == RecordBatch.NO_SEQUENCE) throw new IllegalStateException("Trying to re-enqueue a batch which doesn't have a sequence even " + "though idempotency is enabled."); if (!transactionManager.hasInflightBatches(batch.topicPartition)) throw new IllegalStateException("We are re-enqueueing a batch which is not tracked as part of the in flight " + "requests. batch.topicPartition: " + batch.topicPartition + "; batch.baseSequence: " + batch.baseSequence()); ProducerBatch firstBatchInQueue = deque.peekFirst(); if (firstBatchInQueue != null && firstBatchInQueue.hasSequence() && firstBatchInQueue.baseSequence() < batch.baseSequence()) { // The incoming batch can't be inserted at the front of the queue without violating the sequence ordering. // This means that the incoming batch should be placed somewhere further back. // We need to find the right place for the incoming batch and insert it there. // We will only enter this branch if we have multiple inflights sent to different brokers and we need to retry // the inflight batches. // // Since we reenqueue exactly one batch a time and ensure that the queue is ordered by sequence always, it // is a simple linear scan of a subset of the in flight batches to find the right place in the queue each time. List<ProducerBatch> orderedBatches = new ArrayList<>(); while (deque.peekFirst() != null && deque.peekFirst().hasSequence() && deque.peekFirst().baseSequence() < batch.baseSequence()) orderedBatches.add(deque.pollFirst()); log.debug("Reordered incoming batch with sequence {} for partition {}. It was placed in the queue at " + "position {}", batch.baseSequence(), batch.topicPartition, orderedBatches.size()); // Either we have reached a point where there are batches without a sequence (ie. never been drained // and are hence in order by default), or the batch at the front of the queue has a sequence greater // than the incoming batch. This is the right place to add the incoming batch. deque.addFirst(batch); // Now we have to re insert the previously queued batches in the right order. for (int i = orderedBatches.size() - 1; i >= 0; --i) { deque.addFirst(orderedBatches.get(i)); } // At this point, the incoming batch has been queued in the correct place according to its sequence. } else { deque.addFirst(batch); } } /** * Add the leader to the ready nodes if the batch is ready * * @param nowMs The current time * @param exhausted 'true' is the buffer pool is exhausted * @param part The partition * @param leader The leader for the partition * @param waitedTimeMs How long batch waited * @param backingOff Is backing off * @param full Is batch full * @param nextReadyCheckDelayMs The delay for next check * @param readyNodes The set of ready nodes (to be filled in) * @return The delay for next check */ private long batchReady(long nowMs, boolean exhausted, TopicPartition part, Node leader, long waitedTimeMs, boolean backingOff, boolean full, long nextReadyCheckDelayMs, Set<Node> readyNodes) { if (!readyNodes.contains(leader) && !isMuted(part)) { long timeToWaitMs = backingOff ? retryBackoffMs : lingerMs; boolean expired = waitedTimeMs >= timeToWaitMs; boolean transactionCompleting = transactionManager != null && transactionManager.isCompleting(); boolean sendable = full || expired || exhausted || closed || flushInProgress() || transactionCompleting; if (sendable && !backingOff) { readyNodes.add(leader); } else { long timeLeftMs = Math.max(timeToWaitMs - waitedTimeMs, 0); // Note that this results in a conservative estimate since an un-sendable partition may have // a leader that will later be found to have sendable data. However, this is good enough // since we'll just wake up and then sleep again for the remaining time. nextReadyCheckDelayMs = Math.min(timeLeftMs, nextReadyCheckDelayMs); } } return nextReadyCheckDelayMs; } /** * Iterate over partitions to see which one have batches ready and collect leaders of those partitions * into the set of ready nodes. If partition has no leader, add the topic to the set of topics with * no leader. This function also calculates stats for adaptive partitioning. * * @param cluster The cluster metadata * @param nowMs The current time * @param topic The topic * @param topicInfo The topic info * @param nextReadyCheckDelayMs The delay for next check * @param readyNodes The set of ready nodes (to be filled in) * @param unknownLeaderTopics The set of topics with no leader (to be filled in) * @return The delay for next check */ private long partitionReady(Cluster cluster, long nowMs, String topic, TopicInfo topicInfo, long nextReadyCheckDelayMs, Set<Node> readyNodes, Set<String> unknownLeaderTopics) { ConcurrentMap<Integer, Deque<ProducerBatch>> batches = topicInfo.batches; // Collect the queue sizes for available partitions to be used in adaptive partitioning. int[] queueSizes = null; int[] partitionIds = null; if (enableAdaptivePartitioning && batches.size() >= cluster.partitionsForTopic(topic).size()) { // We don't do adaptive partitioning until we scheduled at least a batch for all // partitions (i.e. we have the corresponding entries in the batches map), we just // do uniform. The reason is that we build queue sizes from the batches map, // and if an entry is missing in the batches map, then adaptive partitioning logic // won't know about it and won't switch to it. queueSizes = new int[batches.size()]; partitionIds = new int[queueSizes.length]; } int queueSizesIndex = -1; boolean exhausted = this.free.queued() > 0; for (Map.Entry<Integer, Deque<ProducerBatch>> entry : batches.entrySet()) { TopicPartition part = new TopicPartition(topic, entry.getKey()); // Advance queueSizesIndex so that we properly index available // partitions. Do it here so that it's done for all code paths. Node leader = cluster.leaderFor(part); if (leader != null && queueSizes != null) { ++queueSizesIndex; assert queueSizesIndex < queueSizes.length; partitionIds[queueSizesIndex] = part.partition(); } Deque<ProducerBatch> deque = entry.getValue(); final long waitedTimeMs; final boolean backingOff; final int dequeSize; final boolean full; // This loop is especially hot with large partition counts. // We are careful to only perform the minimum required inside the // synchronized block, as this lock is also used to synchronize producer threads // attempting to append() to a partition/batch. synchronized (deque) { // Deques are often empty in this path, esp with large partition counts, // so we exit early if we can. ProducerBatch batch = deque.peekFirst(); if (batch == null) { continue; } waitedTimeMs = batch.waitedTimeMs(nowMs); backingOff = batch.attempts() > 0 && waitedTimeMs < retryBackoffMs; dequeSize = deque.size(); full = dequeSize > 1 || batch.isFull(); } if (leader == null) { // This is a partition for which leader is not known, but messages are available to send. // Note that entries are currently not removed from batches when deque is empty. unknownLeaderTopics.add(part.topic()); } else { if (queueSizes != null) queueSizes[queueSizesIndex] = dequeSize; if (partitionAvailabilityTimeoutMs > 0) { // Check if we want to exclude the partition from the list of available partitions // if the broker hasn't responded for some time. NodeLatencyStats nodeLatencyStats = nodeStats.get(leader.id()); if (nodeLatencyStats != null) { // NOTE: there is no synchronization between reading metrics, // so we read ready time first to avoid accidentally marking partition // unavailable if we read while the metrics are being updated. long readyTimeMs = nodeLatencyStats.readyTimeMs; if (readyTimeMs - nodeLatencyStats.drainTimeMs > partitionAvailabilityTimeoutMs) --queueSizesIndex; } } nextReadyCheckDelayMs = batchReady(nowMs, exhausted, part, leader, waitedTimeMs, backingOff, full, nextReadyCheckDelayMs, readyNodes); } } // We've collected the queue sizes for partitions of this topic, now we can calculate // load stats. NOTE: the stats are calculated in place, modifying the // queueSizes array. topicInfo.builtInPartitioner.updatePartitionLoadStats(queueSizes, partitionIds, queueSizesIndex + 1); return nextReadyCheckDelayMs; } /** * Get a list of nodes whose partitions are ready to be sent, and the earliest time at which any non-sendable * partition will be ready; Also return the flag for whether there are any unknown leaders for the accumulated * partition batches. * <p> * A destination node is ready to send data if: * <ol> * <li>There is at least one partition that is not backing off its send * <li><b>and</b> those partitions are not muted (to prevent reordering if * {@value org.apache.kafka.clients.producer.ProducerConfig#MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION} * is set to one)</li> * <li><b>and <i>any</i></b> of the following are true</li> * <ul> * <li>The record set is full</li> * <li>The record set has sat in the accumulator for at least lingerMs milliseconds</li> * <li>The accumulator is out of memory and threads are blocking waiting for data (in this case all partitions * are immediately considered ready).</li> * <li>The accumulator has been closed</li> * </ul> * </ol> */ public ReadyCheckResult ready(Cluster cluster, long nowMs) { Set<Node> readyNodes = new HashSet<>(); long nextReadyCheckDelayMs = Long.MAX_VALUE; Set<String> unknownLeaderTopics = new HashSet<>(); // Go topic by topic so that we can get queue sizes for partitions in a topic and calculate // cumulative frequency table (used in partitioner). for (Map.Entry<String, TopicInfo> topicInfoEntry : this.topicInfoMap.entrySet()) { final String topic = topicInfoEntry.getKey(); nextReadyCheckDelayMs = partitionReady(cluster, nowMs, topic, topicInfoEntry.getValue(), nextReadyCheckDelayMs, readyNodes, unknownLeaderTopics); } return new ReadyCheckResult(readyNodes, nextReadyCheckDelayMs, unknownLeaderTopics); } /** * Check whether there are any batches which haven't been drained */ public boolean hasUndrained() { for (TopicInfo topicInfo : topicInfoMap.values()) { for (Deque<ProducerBatch> deque : topicInfo.batches.values()) { synchronized (deque) { if (!deque.isEmpty()) return true; } } } return false; } private boolean shouldStopDrainBatchesForPartition(ProducerBatch first, TopicPartition tp) { ProducerIdAndEpoch producerIdAndEpoch = null; if (transactionManager != null) { if (!transactionManager.isSendToPartitionAllowed(tp)) return true; producerIdAndEpoch = transactionManager.producerIdAndEpoch(); if (!producerIdAndEpoch.isValid()) // we cannot send the batch until we have refreshed the producer id return true; if (!first.hasSequence()) { if (transactionManager.hasInflightBatches(tp) && transactionManager.hasStaleProducerIdAndEpoch(tp)) { // Don't drain any new batches while the partition has in-flight batches with a different epoch // and/or producer ID. Otherwise, a batch with a new epoch and sequence number // 0 could be written before earlier batches complete, which would cause out of sequence errors return true; } if (transactionManager.hasUnresolvedSequence(first.topicPartition)) // Don't drain any new batches while the state of previous sequence numbers // is unknown. The previous batches would be unknown if they were aborted // on the client after being sent to the broker at least once. return true; } int firstInFlightSequence = transactionManager.firstInFlightSequence(first.topicPartition); if (firstInFlightSequence != RecordBatch.NO_SEQUENCE && first.hasSequence() && first.baseSequence() != firstInFlightSequence) // If the queued batch already has an assigned sequence, then it is being retried. // In this case, we wait until the next immediate batch is ready and drain that. // We only move on when the next in line batch is complete (either successfully or due to // a fatal broker error). This effectively reduces our in flight request count to 1. return true; } return false; } private List<ProducerBatch> drainBatchesForOneNode(Cluster cluster, Node node, int maxSize, long now) { int size = 0; List<PartitionInfo> parts = cluster.partitionsForNode(node.id()); List<ProducerBatch> ready = new ArrayList<>(); /* to make starvation less likely each node has it's own drainIndex */ int drainIndex = getDrainIndex(node.idString()); int start = drainIndex = drainIndex % parts.size(); do { PartitionInfo part = parts.get(drainIndex); TopicPartition tp = new TopicPartition(part.topic(), part.partition()); updateDrainIndex(node.idString(), drainIndex); drainIndex = (drainIndex + 1) % parts.size(); // Only proceed if the partition has no in-flight batches. if (isMuted(tp)) continue; Deque<ProducerBatch> deque = getDeque(tp); if (deque == null) continue; final ProducerBatch batch; synchronized (deque) { // invariant: !isMuted(tp,now) && deque != null ProducerBatch first = deque.peekFirst(); if (first == null) continue; // first != null boolean backoff = first.attempts() > 0 && first.waitedTimeMs(now) < retryBackoffMs; // Only drain the batch if it is not during backoff period. if (backoff) continue; if (size + first.estimatedSizeInBytes() > maxSize && !ready.isEmpty()) { // there is a rare case that a single batch size is larger than the request size due to // compression; in this case we will still eventually send this batch in a single request break; } else { if (shouldStopDrainBatchesForPartition(first, tp)) break; } batch = deque.pollFirst(); boolean isTransactional = transactionManager != null && transactionManager.isTransactional(); ProducerIdAndEpoch producerIdAndEpoch = transactionManager != null ? transactionManager.producerIdAndEpoch() : null; if (producerIdAndEpoch != null && !batch.hasSequence()) { // If the producer id/epoch of the partition do not match the latest one // of the producer, we update it and reset the sequence. This should be // only done when all its in-flight batches have completed. This is guarantee // in `shouldStopDrainBatchesForPartition`. transactionManager.maybeUpdateProducerIdAndEpoch(batch.topicPartition); // If the batch already has an assigned sequence, then we should not change the producer id and // sequence number, since this may introduce duplicates. In particular, the previous attempt // may actually have been accepted, and if we change the producer id and sequence here, this // attempt will also be accepted, causing a duplicate. // // Additionally, we update the next sequence number bound for the partition, and also have // the transaction manager track the batch so as to ensure that sequence ordering is maintained // even if we receive out of order responses. batch.setProducerState(producerIdAndEpoch, transactionManager.sequenceNumber(batch.topicPartition), isTransactional); transactionManager.incrementSequenceNumber(batch.topicPartition, batch.recordCount); log.debug("Assigned producerId {} and producerEpoch {} to batch with base sequence " + "{} being sent to partition {}", producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, batch.baseSequence(), tp); transactionManager.addInFlightBatch(batch); } } // the rest of the work by processing outside the lock // close() is particularly expensive batch.close(); size += batch.records().sizeInBytes(); ready.add(batch); batch.drained(now); } while (start != drainIndex); return ready; } private int getDrainIndex(String idString) { return nodesDrainIndex.computeIfAbsent(idString, s -> 0); } private void updateDrainIndex(String idString, int drainIndex) { nodesDrainIndex.put(idString, drainIndex); } /** * Drain all the data for the given nodes and collate them into a list of batches that will fit within the specified * size on a per-node basis. This method attempts to avoid choosing the same topic-node over and over. * * @param cluster The current cluster metadata * @param nodes The list of node to drain * @param maxSize The maximum number of bytes to drain * @param now The current unix time in milliseconds * @return A list of {@link ProducerBatch} for each node specified with total size less than the requested maxSize. */ public Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now) { if (nodes.isEmpty()) return Collections.emptyMap(); Map<Integer, List<ProducerBatch>> batches = new HashMap<>(); for (Node node : nodes) { List<ProducerBatch> ready = drainBatchesForOneNode(cluster, node, maxSize, now); batches.put(node.id(), ready); } return batches; } public void updateNodeLatencyStats(Integer nodeId, long nowMs, boolean canDrain) { // Don't bother with updating stats if the feature is turned off. if (partitionAvailabilityTimeoutMs <= 0) return; // When the sender gets a node (returned by the ready() function) that has data to send // but the node is not ready (and so we cannot drain the data), we only update the // ready time, then the difference would reflect for how long a node wasn't ready // to send the data. Then we can temporarily remove partitions that are handled by the // node from the list of available partitions so that the partitioner wouldn't pick // this partition. // NOTE: there is no synchronization for metric updates, so drainTimeMs is updated // first to avoid accidentally marking a partition unavailable if the reader gets // values between updates. NodeLatencyStats nodeLatencyStats = nodeStats.computeIfAbsent(nodeId, id -> new NodeLatencyStats(nowMs)); if (canDrain) nodeLatencyStats.drainTimeMs = nowMs; nodeLatencyStats.readyTimeMs = nowMs; } /* Visible for testing */ public NodeLatencyStats getNodeLatencyStats(Integer nodeId) { return nodeStats.get(nodeId); } /* Visible for testing */ public BuiltInPartitioner getBuiltInPartitioner(String topic) { return topicInfoMap.get(topic).builtInPartitioner; } /** * The earliest absolute time a batch will expire (in milliseconds) */ public long nextExpiryTimeMs() { return this.nextBatchExpiryTimeMs; } /* Visible for testing */ public Deque<ProducerBatch> getDeque(TopicPartition tp) { TopicInfo topicInfo = topicInfoMap.get(tp.topic()); if (topicInfo == null) return null; return topicInfo.batches.get(tp.partition()); } /** * Get the deque for the given topic-partition, creating it if necessary. */ private Deque<ProducerBatch> getOrCreateDeque(TopicPartition tp) { TopicInfo topicInfo = topicInfoMap.computeIfAbsent(tp.topic(), k -> new TopicInfo(logContext, k, batchSize)); return topicInfo.batches.computeIfAbsent(tp.partition(), k -> new ArrayDeque<>()); } /** * Deallocate the record batch */ public void deallocate(ProducerBatch batch) { incomplete.remove(batch); // Only deallocate the batch if it is not a split batch because split batch are allocated outside the // buffer pool. if (!batch.isSplitBatch()) free.deallocate(batch.buffer(), batch.initialCapacity()); } /** * Package private for unit test. Get the buffer pool remaining size in bytes. */ long bufferPoolAvailableMemory() { return free.availableMemory(); } /** * Are there any threads currently waiting on a flush? * * package private for test */ boolean flushInProgress() { return flushesInProgress.get() > 0; } /** * Initiate the flushing of data from the accumulator...this makes all requests immediately ready */ public void beginFlush() { this.flushesInProgress.getAndIncrement(); } /** * Are there any threads currently appending messages? */ private boolean appendsInProgress() { return appendsInProgress.get() > 0; } /** * Mark all partitions as ready to send and block until the send is complete */ public void awaitFlushCompletion() throws InterruptedException { try { // Obtain a copy of all of the incomplete ProduceRequestResult(s) at the time of the flush. // We must be careful not to hold a reference to the ProduceBatch(s) so that garbage // collection can occur on the contents. // The sender will remove ProducerBatch(s) from the original incomplete collection. for (ProduceRequestResult result : this.incomplete.requestResults()) result.await(); } finally { this.flushesInProgress.decrementAndGet(); } } /** * Check whether there are any pending batches (whether sent or unsent). */ public boolean hasIncomplete() { return !this.incomplete.isEmpty(); } /** * This function is only called when sender is closed forcefully. It will fail all the * incomplete batches and return. */ public void abortIncompleteBatches() { // We need to keep aborting the incomplete batch until no thread is trying to append to // 1. Avoid losing batches. // 2. Free up memory in case appending threads are blocked on buffer full. // This is a tight loop but should be able to get through very quickly. do { abortBatches(); } while (appendsInProgress()); // After this point, no thread will append any messages because they will see the close // flag set. We need to do the last abort after no thread was appending in case there was a new // batch appended by the last appending thread. abortBatches(); this.topicInfoMap.clear(); } /** * Go through incomplete batches and abort them. */ private void abortBatches() { abortBatches(new KafkaException("Producer is closed forcefully.")); } /** * Abort all incomplete batches (whether they have been sent or not) */ void abortBatches(final RuntimeException reason) { for (ProducerBatch batch : incomplete.copyAll()) { Deque<ProducerBatch> dq = getDeque(batch.topicPartition); synchronized (dq) { batch.abortRecordAppends(); dq.remove(batch); } batch.abort(reason); deallocate(batch); } } /** * Abort any batches which have not been drained */ void abortUndrainedBatches(RuntimeException reason) { for (ProducerBatch batch : incomplete.copyAll()) { Deque<ProducerBatch> dq = getDeque(batch.topicPartition); boolean aborted = false; synchronized (dq) { if ((transactionManager != null && !batch.hasSequence()) || (transactionManager == null && !batch.isClosed())) { aborted = true; batch.abortRecordAppends(); dq.remove(batch); } } if (aborted) { batch.abort(reason); deallocate(batch); } } } public void mutePartition(TopicPartition tp) { muted.add(tp); } public void unmutePartition(TopicPartition tp) { muted.remove(tp); } /** * Close this accumulator and force all the record buffers to be drained */ public void close() { this.closed = true; this.free.close(); } /** * Partitioner config for built-in partitioner */ public static final class PartitionerConfig { private final boolean enableAdaptivePartitioning; private final long partitionAvailabilityTimeoutMs; /** * Partitioner config * * @param enableAdaptivePartitioning If it's true, partition switching adapts to broker load, otherwise partition * switching is random. * @param partitionAvailabilityTimeoutMs If a broker cannot process produce requests from a partition * for the specified time, the partition is treated by the partitioner as not available. * If the timeout is 0, this logic is disabled. */ public PartitionerConfig(boolean enableAdaptivePartitioning, long partitionAvailabilityTimeoutMs) { this.enableAdaptivePartitioning = enableAdaptivePartitioning; this.partitionAvailabilityTimeoutMs = partitionAvailabilityTimeoutMs; } public PartitionerConfig() { this(false, 0); } } /* * Metadata about a record just appended to the record accumulator */ public final static class RecordAppendResult { public final FutureRecordMetadata future; public final boolean batchIsFull; public final boolean newBatchCreated; public final boolean abortForNewBatch; public final int appendedBytes; public RecordAppendResult(FutureRecordMetadata future, boolean batchIsFull, boolean newBatchCreated, boolean abortForNewBatch, int appendedBytes) { this.future = future; this.batchIsFull = batchIsFull; this.newBatchCreated = newBatchCreated; this.abortForNewBatch = abortForNewBatch; this.appendedBytes = appendedBytes; } } /* * The callbacks passed into append */ public interface AppendCallbacks extends Callback { /** * Called to set partition (when append is called, partition may not be calculated yet). * @param partition The partition */ void setPartition(int partition); } /* * The set of nodes that have at least one complete record batch in the accumulator */ public final static class ReadyCheckResult { public final Set<Node> readyNodes; public final long nextReadyCheckDelayMs; public final Set<String> unknownLeaderTopics; public ReadyCheckResult(Set<Node> readyNodes, long nextReadyCheckDelayMs, Set<String> unknownLeaderTopics) { this.readyNodes = readyNodes; this.nextReadyCheckDelayMs = nextReadyCheckDelayMs; this.unknownLeaderTopics = unknownLeaderTopics; } } /** * Per topic info. */ private static class TopicInfo { public final ConcurrentMap<Integer /*partition*/, Deque<ProducerBatch>> batches = new CopyOnWriteMap<>(); public final BuiltInPartitioner builtInPartitioner; public TopicInfo(LogContext logContext, String topic, int stickyBatchSize) { builtInPartitioner = new BuiltInPartitioner(logContext, topic, stickyBatchSize); } } /** * Node latency stats for each node that are used for adaptive partition distribution * Visible for testing */ public final static class NodeLatencyStats { volatile public long readyTimeMs; // last time the node had batches ready to send volatile public long drainTimeMs; // last time the node was able to drain batches NodeLatencyStats(long nowMs) { readyTimeMs = nowMs; drainTimeMs = nowMs; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/Sender.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.ClientRequest; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.NetworkClientUtils; import org.apache.kafka.clients.RequestCompletionHandler; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.InvalidRecordException; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.InvalidMetadataException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.errors.TransactionAbortedException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.message.ProduceRequestData; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.metrics.stats.Meter; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.ProduceRequest; import org.apache.kafka.common.requests.ProduceResponse; import org.apache.kafka.common.requests.RequestHeader; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.function.Function; import java.util.stream.Collectors; /** * The background thread that handles the sending of produce requests to the Kafka cluster. This thread makes metadata * requests to renew its view of the cluster and then sends produce requests to the appropriate nodes. */ public class Sender implements Runnable { private final Logger log; /* the state of each nodes connection */ private final KafkaClient client; /* the record accumulator that batches records */ private final RecordAccumulator accumulator; /* the metadata for the client */ private final ProducerMetadata metadata; /* the flag indicating whether the producer should guarantee the message order on the broker or not. */ private final boolean guaranteeMessageOrder; /* the maximum request size to attempt to send to the server */ private final int maxRequestSize; /* the number of acknowledgements to request from the server */ private final short acks; /* the number of times to retry a failed request before giving up */ private final int retries; /* the clock instance used for getting the time */ private final Time time; /* true while the sender thread is still running */ private volatile boolean running; /* true when the caller wants to ignore all unsent/inflight messages and force close. */ private volatile boolean forceClose; /* metrics */ private final SenderMetrics sensors; /* the max time to wait for the server to respond to the request*/ private final int requestTimeoutMs; /* The max time to wait before retrying a request which has failed */ private final long retryBackoffMs; /* current request API versions supported by the known brokers */ private final ApiVersions apiVersions; /* all the state related to transactions, in particular the producer id, producer epoch, and sequence numbers */ private final TransactionManager transactionManager; // A per-partition queue of batches ordered by creation time for tracking the in-flight batches private final Map<TopicPartition, List<ProducerBatch>> inFlightBatches; public Sender(LogContext logContext, KafkaClient client, ProducerMetadata metadata, RecordAccumulator accumulator, boolean guaranteeMessageOrder, int maxRequestSize, short acks, int retries, SenderMetricsRegistry metricsRegistry, Time time, int requestTimeoutMs, long retryBackoffMs, TransactionManager transactionManager, ApiVersions apiVersions) { this.log = logContext.logger(Sender.class); this.client = client; this.accumulator = accumulator; this.metadata = metadata; this.guaranteeMessageOrder = guaranteeMessageOrder; this.maxRequestSize = maxRequestSize; this.running = true; this.acks = acks; this.retries = retries; this.time = time; this.sensors = new SenderMetrics(metricsRegistry, metadata, client, time); this.requestTimeoutMs = requestTimeoutMs; this.retryBackoffMs = retryBackoffMs; this.apiVersions = apiVersions; this.transactionManager = transactionManager; this.inFlightBatches = new HashMap<>(); } public List<ProducerBatch> inFlightBatches(TopicPartition tp) { return inFlightBatches.containsKey(tp) ? inFlightBatches.get(tp) : new ArrayList<>(); } private void maybeRemoveFromInflightBatches(ProducerBatch batch) { List<ProducerBatch> batches = inFlightBatches.get(batch.topicPartition); if (batches != null) { batches.remove(batch); if (batches.isEmpty()) { inFlightBatches.remove(batch.topicPartition); } } } private void maybeRemoveAndDeallocateBatch(ProducerBatch batch) { maybeRemoveFromInflightBatches(batch); this.accumulator.deallocate(batch); } /** * Get the in-flight batches that has reached delivery timeout. */ private List<ProducerBatch> getExpiredInflightBatches(long now) { List<ProducerBatch> expiredBatches = new ArrayList<>(); for (Iterator<Map.Entry<TopicPartition, List<ProducerBatch>>> batchIt = inFlightBatches.entrySet().iterator(); batchIt.hasNext();) { Map.Entry<TopicPartition, List<ProducerBatch>> entry = batchIt.next(); List<ProducerBatch> partitionInFlightBatches = entry.getValue(); if (partitionInFlightBatches != null) { Iterator<ProducerBatch> iter = partitionInFlightBatches.iterator(); while (iter.hasNext()) { ProducerBatch batch = iter.next(); if (batch.hasReachedDeliveryTimeout(accumulator.getDeliveryTimeoutMs(), now)) { iter.remove(); // expireBatches is called in Sender.sendProducerData, before client.poll. // The !batch.isDone() invariant should always hold. An IllegalStateException // exception will be thrown if the invariant is violated. if (!batch.isDone()) { expiredBatches.add(batch); } else { throw new IllegalStateException(batch.topicPartition + " batch created at " + batch.createdMs + " gets unexpected final state " + batch.finalState()); } } else { accumulator.maybeUpdateNextBatchExpiryTime(batch); break; } } if (partitionInFlightBatches.isEmpty()) { batchIt.remove(); } } } return expiredBatches; } private void addToInflightBatches(List<ProducerBatch> batches) { for (ProducerBatch batch : batches) { List<ProducerBatch> inflightBatchList = inFlightBatches.get(batch.topicPartition); if (inflightBatchList == null) { inflightBatchList = new ArrayList<>(); inFlightBatches.put(batch.topicPartition, inflightBatchList); } inflightBatchList.add(batch); } } public void addToInflightBatches(Map<Integer, List<ProducerBatch>> batches) { for (List<ProducerBatch> batchList : batches.values()) { addToInflightBatches(batchList); } } private boolean hasPendingTransactionalRequests() { return transactionManager != null && transactionManager.hasPendingRequests() && transactionManager.hasOngoingTransaction(); } /** * The main run loop for the sender thread */ @Override public void run() { log.debug("Starting Kafka producer I/O thread."); // main loop, runs until close is called while (running) { try { runOnce(); } catch (Exception e) { log.error("Uncaught error in kafka producer I/O thread: ", e); } } log.debug("Beginning shutdown of Kafka producer I/O thread, sending remaining records."); // okay we stopped accepting requests but there may still be // requests in the transaction manager, accumulator or waiting for acknowledgment, // wait until these are completed. while (!forceClose && ((this.accumulator.hasUndrained() || this.client.inFlightRequestCount() > 0) || hasPendingTransactionalRequests())) { try { runOnce(); } catch (Exception e) { log.error("Uncaught error in kafka producer I/O thread: ", e); } } // Abort the transaction if any commit or abort didn't go through the transaction manager's queue while (!forceClose && transactionManager != null && transactionManager.hasOngoingTransaction()) { if (!transactionManager.isCompleting()) { log.info("Aborting incomplete transaction due to shutdown"); transactionManager.beginAbort(); } try { runOnce(); } catch (Exception e) { log.error("Uncaught error in kafka producer I/O thread: ", e); } } if (forceClose) { // We need to fail all the incomplete transactional requests and batches and wake up the threads waiting on // the futures. if (transactionManager != null) { log.debug("Aborting incomplete transactional requests due to forced shutdown"); transactionManager.close(); } log.debug("Aborting incomplete batches due to forced shutdown"); this.accumulator.abortIncompleteBatches(); } try { this.client.close(); } catch (Exception e) { log.error("Failed to close network client", e); } log.debug("Shutdown of Kafka producer I/O thread has completed."); } /** * Run a single iteration of sending * */ void runOnce() { if (transactionManager != null) { try { transactionManager.maybeResolveSequences(); // do not continue sending if the transaction manager is in a failed state if (transactionManager.hasFatalError()) { RuntimeException lastError = transactionManager.lastError(); if (lastError != null) maybeAbortBatches(lastError); client.poll(retryBackoffMs, time.milliseconds()); return; } // Check whether we need a new producerId. If so, we will enqueue an InitProducerId // request which will be sent below transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); if (maybeSendAndPollTransactionalRequest()) { return; } } catch (AuthenticationException e) { // This is already logged as error, but propagated here to perform any clean ups. log.trace("Authentication exception while processing transactional request", e); transactionManager.authenticationFailed(e); } } long currentTimeMs = time.milliseconds(); long pollTimeout = sendProducerData(currentTimeMs); client.poll(pollTimeout, currentTimeMs); } private long sendProducerData(long now) { Cluster cluster = metadata.fetch(); // get the list of partitions with data ready to send RecordAccumulator.ReadyCheckResult result = this.accumulator.ready(cluster, now); // if there are any partitions whose leaders are not known yet, force metadata update if (!result.unknownLeaderTopics.isEmpty()) { // The set of topics with unknown leader contains topics with leader election pending as well as // topics which may have expired. Add the topic again to metadata to ensure it is included // and request metadata update, since there are messages to send to the topic. for (String topic : result.unknownLeaderTopics) this.metadata.add(topic, now); log.debug("Requesting metadata update due to unknown leader topics from the batched records: {}", result.unknownLeaderTopics); this.metadata.requestUpdate(); } // remove any nodes we aren't ready to send to Iterator<Node> iter = result.readyNodes.iterator(); long notReadyTimeout = Long.MAX_VALUE; while (iter.hasNext()) { Node node = iter.next(); if (!this.client.ready(node, now)) { // Update just the readyTimeMs of the latency stats, so that it moves forward // every time the batch is ready (then the difference between readyTimeMs and // drainTimeMs would represent how long data is waiting for the node). this.accumulator.updateNodeLatencyStats(node.id(), now, false); iter.remove(); notReadyTimeout = Math.min(notReadyTimeout, this.client.pollDelayMs(node, now)); } else { // Update both readyTimeMs and drainTimeMs, this would "reset" the node // latency. this.accumulator.updateNodeLatencyStats(node.id(), now, true); } } // create produce requests Map<Integer, List<ProducerBatch>> batches = this.accumulator.drain(cluster, result.readyNodes, this.maxRequestSize, now); addToInflightBatches(batches); if (guaranteeMessageOrder) { // Mute all the partitions drained for (List<ProducerBatch> batchList : batches.values()) { for (ProducerBatch batch : batchList) this.accumulator.mutePartition(batch.topicPartition); } } accumulator.resetNextBatchExpiryTime(); List<ProducerBatch> expiredInflightBatches = getExpiredInflightBatches(now); List<ProducerBatch> expiredBatches = this.accumulator.expiredBatches(now); expiredBatches.addAll(expiredInflightBatches); // Reset the producer id if an expired batch has previously been sent to the broker. Also update the metrics // for expired batches. see the documentation of @TransactionState.resetIdempotentProducerId to understand why // we need to reset the producer id here. if (!expiredBatches.isEmpty()) log.trace("Expired {} batches in accumulator", expiredBatches.size()); for (ProducerBatch expiredBatch : expiredBatches) { String errorMessage = "Expiring " + expiredBatch.recordCount + " record(s) for " + expiredBatch.topicPartition + ":" + (now - expiredBatch.createdMs) + " ms has passed since batch creation"; failBatch(expiredBatch, new TimeoutException(errorMessage), false); if (transactionManager != null && expiredBatch.inRetry()) { // This ensures that no new batches are drained until the current in flight batches are fully resolved. transactionManager.markSequenceUnresolved(expiredBatch); } } sensors.updateProduceRequestMetrics(batches); // If we have any nodes that are ready to send + have sendable data, poll with 0 timeout so this can immediately // loop and try sending more data. Otherwise, the timeout will be the smaller value between next batch expiry // time, and the delay time for checking data availability. Note that the nodes may have data that isn't yet // sendable due to lingering, backing off, etc. This specifically does not include nodes with sendable data // that aren't ready to send since they would cause busy looping. long pollTimeout = Math.min(result.nextReadyCheckDelayMs, notReadyTimeout); pollTimeout = Math.min(pollTimeout, this.accumulator.nextExpiryTimeMs() - now); pollTimeout = Math.max(pollTimeout, 0); if (!result.readyNodes.isEmpty()) { log.trace("Nodes with data ready to send: {}", result.readyNodes); // if some partitions are already ready to be sent, the select time would be 0; // otherwise if some partition already has some data accumulated but not ready yet, // the select time will be the time difference between now and its linger expiry time; // otherwise the select time will be the time difference between now and the metadata expiry time; pollTimeout = 0; } sendProduceRequests(batches, now); return pollTimeout; } /** * Returns true if a transactional request is sent or polled, or if a FindCoordinator request is enqueued */ private boolean maybeSendAndPollTransactionalRequest() { if (transactionManager.hasInFlightRequest()) { // as long as there are outstanding transactional requests, we simply wait for them to return client.poll(retryBackoffMs, time.milliseconds()); return true; } if (transactionManager.hasAbortableError() || transactionManager.isAborting()) { if (accumulator.hasIncomplete()) { // Attempt to get the last error that caused this abort. RuntimeException exception = transactionManager.lastError(); // If there was no error, but we are still aborting, // then this is most likely a case where there was no fatal error. if (exception == null) { exception = new TransactionAbortedException(); } accumulator.abortUndrainedBatches(exception); } } TransactionManager.TxnRequestHandler nextRequestHandler = transactionManager.nextRequest(accumulator.hasIncomplete()); if (nextRequestHandler == null) return false; AbstractRequest.Builder<?> requestBuilder = nextRequestHandler.requestBuilder(); Node targetNode = null; try { FindCoordinatorRequest.CoordinatorType coordinatorType = nextRequestHandler.coordinatorType(); targetNode = coordinatorType != null ? transactionManager.coordinator(coordinatorType) : client.leastLoadedNode(time.milliseconds()); if (targetNode != null) { if (!awaitNodeReady(targetNode, coordinatorType)) { log.trace("Target node {} not ready within request timeout, will retry when node is ready.", targetNode); maybeFindCoordinatorAndRetry(nextRequestHandler); return true; } } else if (coordinatorType != null) { log.trace("Coordinator not known for {}, will retry {} after finding coordinator.", coordinatorType, requestBuilder.apiKey()); maybeFindCoordinatorAndRetry(nextRequestHandler); return true; } else { log.trace("No nodes available to send requests, will poll and retry when until a node is ready."); transactionManager.retry(nextRequestHandler); client.poll(retryBackoffMs, time.milliseconds()); return true; } if (nextRequestHandler.isRetry()) time.sleep(nextRequestHandler.retryBackoffMs()); long currentTimeMs = time.milliseconds(); ClientRequest clientRequest = client.newClientRequest(targetNode.idString(), requestBuilder, currentTimeMs, true, requestTimeoutMs, nextRequestHandler); log.debug("Sending transactional request {} to node {} with correlation ID {}", requestBuilder, targetNode, clientRequest.correlationId()); client.send(clientRequest, currentTimeMs); transactionManager.setInFlightCorrelationId(clientRequest.correlationId()); client.poll(retryBackoffMs, time.milliseconds()); return true; } catch (IOException e) { log.debug("Disconnect from {} while trying to send request {}. Going " + "to back off and retry.", targetNode, requestBuilder, e); // We break here so that we pick up the FindCoordinator request immediately. maybeFindCoordinatorAndRetry(nextRequestHandler); return true; } } private void maybeFindCoordinatorAndRetry(TransactionManager.TxnRequestHandler nextRequestHandler) { if (nextRequestHandler.needsCoordinator()) { transactionManager.lookupCoordinator(nextRequestHandler); } else { // For non-coordinator requests, sleep here to prevent a tight loop when no node is available time.sleep(retryBackoffMs); metadata.requestUpdate(); } transactionManager.retry(nextRequestHandler); } private void maybeAbortBatches(RuntimeException exception) { if (accumulator.hasIncomplete()) { log.error("Aborting producer batches due to fatal error", exception); accumulator.abortBatches(exception); } } /** * Start closing the sender (won't actually complete until all data is sent out) */ public void initiateClose() { // Ensure accumulator is closed first to guarantee that no more appends are accepted after // breaking from the sender loop. Otherwise, we may miss some callbacks when shutting down. this.accumulator.close(); this.running = false; this.wakeup(); } /** * Closes the sender without sending out any pending messages. */ public void forceClose() { this.forceClose = true; initiateClose(); } public boolean isRunning() { return running; } private boolean awaitNodeReady(Node node, FindCoordinatorRequest.CoordinatorType coordinatorType) throws IOException { if (NetworkClientUtils.awaitReady(client, node, time, requestTimeoutMs)) { if (coordinatorType == FindCoordinatorRequest.CoordinatorType.TRANSACTION) { // Indicate to the transaction manager that the coordinator is ready, allowing it to check ApiVersions // This allows us to bump transactional epochs even if the coordinator is temporarily unavailable at // the time when the abortable error is handled transactionManager.handleCoordinatorReady(); } return true; } return false; } /** * Handle a produce response */ private void handleProduceResponse(ClientResponse response, Map<TopicPartition, ProducerBatch> batches, long now) { RequestHeader requestHeader = response.requestHeader(); int correlationId = requestHeader.correlationId(); if (response.wasTimedOut()) { log.trace("Cancelled request with header {} due to the last request to node {} timed out", requestHeader, response.destination()); for (ProducerBatch batch : batches.values()) completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.REQUEST_TIMED_OUT, String.format("Disconnected from node %s due to timeout", response.destination())), correlationId, now); } else if (response.wasDisconnected()) { log.trace("Cancelled request with header {} due to node {} being disconnected", requestHeader, response.destination()); for (ProducerBatch batch : batches.values()) completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.NETWORK_EXCEPTION, String.format("Disconnected from node %s", response.destination())), correlationId, now); } else if (response.versionMismatch() != null) { log.warn("Cancelled request {} due to a version mismatch with node {}", response, response.destination(), response.versionMismatch()); for (ProducerBatch batch : batches.values()) completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.UNSUPPORTED_VERSION), correlationId, now); } else { log.trace("Received produce response from node {} with correlation id {}", response.destination(), correlationId); // if we have a response, parse it if (response.hasResponse()) { // Sender should exercise PartitionProduceResponse rather than ProduceResponse.PartitionResponse // https://issues.apache.org/jira/browse/KAFKA-10696 ProduceResponse produceResponse = (ProduceResponse) response.responseBody(); produceResponse.data().responses().forEach(r -> r.partitionResponses().forEach(p -> { TopicPartition tp = new TopicPartition(r.name(), p.index()); ProduceResponse.PartitionResponse partResp = new ProduceResponse.PartitionResponse( Errors.forCode(p.errorCode()), p.baseOffset(), p.logAppendTimeMs(), p.logStartOffset(), p.recordErrors() .stream() .map(e -> new ProduceResponse.RecordError(e.batchIndex(), e.batchIndexErrorMessage())) .collect(Collectors.toList()), p.errorMessage()); ProducerBatch batch = batches.get(tp); completeBatch(batch, partResp, correlationId, now); })); this.sensors.recordLatency(response.destination(), response.requestLatencyMs()); } else { // this is the acks = 0 case, just complete all requests for (ProducerBatch batch : batches.values()) { completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.NONE), correlationId, now); } } } } /** * Complete or retry the given batch of records. * * @param batch The record batch * @param response The produce response * @param correlationId The correlation id for the request * @param now The current POSIX timestamp in milliseconds */ private void completeBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response, long correlationId, long now) { Errors error = response.error; if (error == Errors.MESSAGE_TOO_LARGE && batch.recordCount > 1 && !batch.isDone() && (batch.magic() >= RecordBatch.MAGIC_VALUE_V2 || batch.isCompressed())) { // If the batch is too large, we split the batch and send the split batches again. We do not decrement // the retry attempts in this case. log.warn( "Got error produce response in correlation id {} on topic-partition {}, splitting and retrying ({} attempts left). Error: {}", correlationId, batch.topicPartition, this.retries - batch.attempts(), formatErrMsg(response)); if (transactionManager != null) transactionManager.removeInFlightBatch(batch); this.accumulator.splitAndReenqueue(batch); maybeRemoveAndDeallocateBatch(batch); this.sensors.recordBatchSplit(); } else if (error != Errors.NONE) { if (canRetry(batch, response, now)) { log.warn( "Got error produce response with correlation id {} on topic-partition {}, retrying ({} attempts left). Error: {}", correlationId, batch.topicPartition, this.retries - batch.attempts() - 1, formatErrMsg(response)); reenqueueBatch(batch, now); } else if (error == Errors.DUPLICATE_SEQUENCE_NUMBER) { // If we have received a duplicate sequence error, it means that the sequence number has advanced beyond // the sequence of the current batch, and we haven't retained batch metadata on the broker to return // the correct offset and timestamp. // // The only thing we can do is to return success to the user and not return a valid offset and timestamp. completeBatch(batch, response); } else { // tell the user the result of their request. We only adjust sequence numbers if the batch didn't exhaust // its retries -- if it did, we don't know whether the sequence number was accepted or not, and // thus it is not safe to reassign the sequence. failBatch(batch, response, batch.attempts() < this.retries); } if (error.exception() instanceof InvalidMetadataException) { if (error.exception() instanceof UnknownTopicOrPartitionException) { log.warn("Received unknown topic or partition error in produce request on partition {}. The " + "topic-partition may not exist or the user may not have Describe access to it", batch.topicPartition); } else { log.warn("Received invalid metadata error in produce request on partition {} due to {}. Going " + "to request metadata update now", batch.topicPartition, error.exception(response.errorMessage).toString()); } metadata.requestUpdate(); } } else { completeBatch(batch, response); } // Unmute the completed partition. if (guaranteeMessageOrder) this.accumulator.unmutePartition(batch.topicPartition); } /** * Format the error from a {@link ProduceResponse.PartitionResponse} in a user-friendly string * e.g "NETWORK_EXCEPTION. Error Message: Disconnected from node 0" */ private String formatErrMsg(ProduceResponse.PartitionResponse response) { String errorMessageSuffix = (response.errorMessage == null || response.errorMessage.isEmpty()) ? "" : String.format(". Error Message: %s", response.errorMessage); return String.format("%s%s", response.error, errorMessageSuffix); } private void reenqueueBatch(ProducerBatch batch, long currentTimeMs) { this.accumulator.reenqueue(batch, currentTimeMs); maybeRemoveFromInflightBatches(batch); this.sensors.recordRetries(batch.topicPartition.topic(), batch.recordCount); } private void completeBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response) { if (transactionManager != null) { transactionManager.handleCompletedBatch(batch, response); } if (batch.complete(response.baseOffset, response.logAppendTime)) { maybeRemoveAndDeallocateBatch(batch); } } private void failBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response, boolean adjustSequenceNumbers) { final RuntimeException topLevelException; if (response.error == Errors.TOPIC_AUTHORIZATION_FAILED) topLevelException = new TopicAuthorizationException(Collections.singleton(batch.topicPartition.topic())); else if (response.error == Errors.CLUSTER_AUTHORIZATION_FAILED) topLevelException = new ClusterAuthorizationException("The producer is not authorized to do idempotent sends"); else topLevelException = response.error.exception(response.errorMessage); if (response.recordErrors == null || response.recordErrors.isEmpty()) { failBatch(batch, topLevelException, adjustSequenceNumbers); } else { Map<Integer, RuntimeException> recordErrorMap = new HashMap<>(response.recordErrors.size()); for (ProduceResponse.RecordError recordError : response.recordErrors) { // The API leaves us with some awkwardness interpreting the errors in the response. // We cannot differentiate between different error cases (such as INVALID_TIMESTAMP) // from the single error code at the partition level, so instead we use INVALID_RECORD // for all failed records and rely on the message to distinguish the cases. final String errorMessage; if (recordError.message != null) { errorMessage = recordError.message; } else if (response.errorMessage != null) { errorMessage = response.errorMessage; } else { errorMessage = response.error.message(); } // If the batch contained only a single record error, then we can unambiguously // use the exception type corresponding to the partition-level error code. if (response.recordErrors.size() == 1) { recordErrorMap.put(recordError.batchIndex, response.error.exception(errorMessage)); } else { recordErrorMap.put(recordError.batchIndex, new InvalidRecordException(errorMessage)); } } Function<Integer, RuntimeException> recordExceptions = batchIndex -> { RuntimeException exception = recordErrorMap.get(batchIndex); if (exception != null) { return exception; } else { // If the response contains record errors, then the records which failed validation // will be present in the response. To avoid confusion for the remaining records, we // return a generic exception. return new KafkaException("Failed to append record because it was part of a batch " + "which had one more more invalid records"); } }; failBatch(batch, topLevelException, recordExceptions, adjustSequenceNumbers); } } private void failBatch( ProducerBatch batch, RuntimeException topLevelException, boolean adjustSequenceNumbers ) { failBatch(batch, topLevelException, batchIndex -> topLevelException, adjustSequenceNumbers); } private void failBatch( ProducerBatch batch, RuntimeException topLevelException, Function<Integer, RuntimeException> recordExceptions, boolean adjustSequenceNumbers ) { if (transactionManager != null) { transactionManager.handleFailedBatch(batch, topLevelException, adjustSequenceNumbers); } this.sensors.recordErrors(batch.topicPartition.topic(), batch.recordCount); if (batch.completeExceptionally(topLevelException, recordExceptions)) { maybeRemoveAndDeallocateBatch(batch); } } /** * We can retry a send if the error is transient and the number of attempts taken is fewer than the maximum allowed. * We can also retry OutOfOrderSequence exceptions for future batches, since if the first batch has failed, the * future batches are certain to fail with an OutOfOrderSequence exception. */ private boolean canRetry(ProducerBatch batch, ProduceResponse.PartitionResponse response, long now) { return !batch.hasReachedDeliveryTimeout(accumulator.getDeliveryTimeoutMs(), now) && batch.attempts() < this.retries && !batch.isDone() && (transactionManager == null ? response.error.exception() instanceof RetriableException : transactionManager.canRetry(response, batch)); } /** * Transfer the record batches into a list of produce requests on a per-node basis */ private void sendProduceRequests(Map<Integer, List<ProducerBatch>> collated, long now) { for (Map.Entry<Integer, List<ProducerBatch>> entry : collated.entrySet()) sendProduceRequest(now, entry.getKey(), acks, requestTimeoutMs, entry.getValue()); } /** * Create a produce request from the given record batches */ private void sendProduceRequest(long now, int destination, short acks, int timeout, List<ProducerBatch> batches) { if (batches.isEmpty()) return; final Map<TopicPartition, ProducerBatch> recordsByPartition = new HashMap<>(batches.size()); // find the minimum magic version used when creating the record sets byte minUsedMagic = apiVersions.maxUsableProduceMagic(); for (ProducerBatch batch : batches) { if (batch.magic() < minUsedMagic) minUsedMagic = batch.magic(); } ProduceRequestData.TopicProduceDataCollection tpd = new ProduceRequestData.TopicProduceDataCollection(); for (ProducerBatch batch : batches) { TopicPartition tp = batch.topicPartition; MemoryRecords records = batch.records(); // down convert if necessary to the minimum magic used. In general, there can be a delay between the time // that the producer starts building the batch and the time that we send the request, and we may have // chosen the message format based on out-dated metadata. In the worst case, we optimistically chose to use // the new message format, but found that the broker didn't support it, so we need to down-convert on the // client before sending. This is intended to handle edge cases around cluster upgrades where brokers may // not all support the same message format version. For example, if a partition migrates from a broker // which is supporting the new magic version to one which doesn't, then we will need to convert. if (!records.hasMatchingMagic(minUsedMagic)) records = batch.records().downConvert(minUsedMagic, 0, time).records(); ProduceRequestData.TopicProduceData tpData = tpd.find(tp.topic()); if (tpData == null) { tpData = new ProduceRequestData.TopicProduceData().setName(tp.topic()); tpd.add(tpData); } tpData.partitionData().add(new ProduceRequestData.PartitionProduceData() .setIndex(tp.partition()) .setRecords(records)); recordsByPartition.put(tp, batch); } String transactionalId = null; if (transactionManager != null && transactionManager.isTransactional()) { transactionalId = transactionManager.transactionalId(); } ProduceRequest.Builder requestBuilder = ProduceRequest.forMagic(minUsedMagic, new ProduceRequestData() .setAcks(acks) .setTimeoutMs(timeout) .setTransactionalId(transactionalId) .setTopicData(tpd)); RequestCompletionHandler callback = response -> handleProduceResponse(response, recordsByPartition, time.milliseconds()); String nodeId = Integer.toString(destination); ClientRequest clientRequest = client.newClientRequest(nodeId, requestBuilder, now, acks != 0, requestTimeoutMs, callback); client.send(clientRequest, now); log.trace("Sent produce request to {}: {}", nodeId, requestBuilder); } /** * Wake up the selector associated with this send thread */ public void wakeup() { this.client.wakeup(); } public static Sensor throttleTimeSensor(SenderMetricsRegistry metrics) { Sensor produceThrottleTimeSensor = metrics.sensor("produce-throttle-time"); produceThrottleTimeSensor.add(metrics.produceThrottleTimeAvg, new Avg()); produceThrottleTimeSensor.add(metrics.produceThrottleTimeMax, new Max()); return produceThrottleTimeSensor; } /** * A collection of sensors for the sender */ private static class SenderMetrics { public final Sensor retrySensor; public final Sensor errorSensor; public final Sensor queueTimeSensor; public final Sensor requestTimeSensor; public final Sensor recordsPerRequestSensor; public final Sensor batchSizeSensor; public final Sensor compressionRateSensor; public final Sensor maxRecordSizeSensor; public final Sensor batchSplitSensor; private final SenderMetricsRegistry metrics; private final Time time; public SenderMetrics(SenderMetricsRegistry metrics, Metadata metadata, KafkaClient client, Time time) { this.metrics = metrics; this.time = time; this.batchSizeSensor = metrics.sensor("batch-size"); this.batchSizeSensor.add(metrics.batchSizeAvg, new Avg()); this.batchSizeSensor.add(metrics.batchSizeMax, new Max()); this.compressionRateSensor = metrics.sensor("compression-rate"); this.compressionRateSensor.add(metrics.compressionRateAvg, new Avg()); this.queueTimeSensor = metrics.sensor("queue-time"); this.queueTimeSensor.add(metrics.recordQueueTimeAvg, new Avg()); this.queueTimeSensor.add(metrics.recordQueueTimeMax, new Max()); this.requestTimeSensor = metrics.sensor("request-time"); this.requestTimeSensor.add(metrics.requestLatencyAvg, new Avg()); this.requestTimeSensor.add(metrics.requestLatencyMax, new Max()); this.recordsPerRequestSensor = metrics.sensor("records-per-request"); this.recordsPerRequestSensor.add(new Meter(metrics.recordSendRate, metrics.recordSendTotal)); this.recordsPerRequestSensor.add(metrics.recordsPerRequestAvg, new Avg()); this.retrySensor = metrics.sensor("record-retries"); this.retrySensor.add(new Meter(metrics.recordRetryRate, metrics.recordRetryTotal)); this.errorSensor = metrics.sensor("errors"); this.errorSensor.add(new Meter(metrics.recordErrorRate, metrics.recordErrorTotal)); this.maxRecordSizeSensor = metrics.sensor("record-size"); this.maxRecordSizeSensor.add(metrics.recordSizeMax, new Max()); this.maxRecordSizeSensor.add(metrics.recordSizeAvg, new Avg()); this.metrics.addMetric(metrics.requestsInFlight, (config, now) -> client.inFlightRequestCount()); this.metrics.addMetric(metrics.metadataAge, (config, now) -> (now - metadata.lastSuccessfulUpdate()) / 1000.0); this.batchSplitSensor = metrics.sensor("batch-split-rate"); this.batchSplitSensor.add(new Meter(metrics.batchSplitRate, metrics.batchSplitTotal)); } private void maybeRegisterTopicMetrics(String topic) { // if one sensor of the metrics has been registered for the topic, // then all other sensors should have been registered; and vice versa String topicRecordsCountName = "topic." + topic + ".records-per-batch"; Sensor topicRecordCount = this.metrics.getSensor(topicRecordsCountName); if (topicRecordCount == null) { Map<String, String> metricTags = Collections.singletonMap("topic", topic); topicRecordCount = this.metrics.sensor(topicRecordsCountName); MetricName rateMetricName = this.metrics.topicRecordSendRate(metricTags); MetricName totalMetricName = this.metrics.topicRecordSendTotal(metricTags); topicRecordCount.add(new Meter(rateMetricName, totalMetricName)); String topicByteRateName = "topic." + topic + ".bytes"; Sensor topicByteRate = this.metrics.sensor(topicByteRateName); rateMetricName = this.metrics.topicByteRate(metricTags); totalMetricName = this.metrics.topicByteTotal(metricTags); topicByteRate.add(new Meter(rateMetricName, totalMetricName)); String topicCompressionRateName = "topic." + topic + ".compression-rate"; Sensor topicCompressionRate = this.metrics.sensor(topicCompressionRateName); MetricName m = this.metrics.topicCompressionRate(metricTags); topicCompressionRate.add(m, new Avg()); String topicRetryName = "topic." + topic + ".record-retries"; Sensor topicRetrySensor = this.metrics.sensor(topicRetryName); rateMetricName = this.metrics.topicRecordRetryRate(metricTags); totalMetricName = this.metrics.topicRecordRetryTotal(metricTags); topicRetrySensor.add(new Meter(rateMetricName, totalMetricName)); String topicErrorName = "topic." + topic + ".record-errors"; Sensor topicErrorSensor = this.metrics.sensor(topicErrorName); rateMetricName = this.metrics.topicRecordErrorRate(metricTags); totalMetricName = this.metrics.topicRecordErrorTotal(metricTags); topicErrorSensor.add(new Meter(rateMetricName, totalMetricName)); } } public void updateProduceRequestMetrics(Map<Integer, List<ProducerBatch>> batches) { long now = time.milliseconds(); for (List<ProducerBatch> nodeBatch : batches.values()) { int records = 0; for (ProducerBatch batch : nodeBatch) { // register all per-topic metrics at once String topic = batch.topicPartition.topic(); maybeRegisterTopicMetrics(topic); // per-topic record send rate String topicRecordsCountName = "topic." + topic + ".records-per-batch"; Sensor topicRecordCount = Objects.requireNonNull(this.metrics.getSensor(topicRecordsCountName)); topicRecordCount.record(batch.recordCount); // per-topic bytes send rate String topicByteRateName = "topic." + topic + ".bytes"; Sensor topicByteRate = Objects.requireNonNull(this.metrics.getSensor(topicByteRateName)); topicByteRate.record(batch.estimatedSizeInBytes()); // per-topic compression rate String topicCompressionRateName = "topic." + topic + ".compression-rate"; Sensor topicCompressionRate = Objects.requireNonNull(this.metrics.getSensor(topicCompressionRateName)); topicCompressionRate.record(batch.compressionRatio()); // global metrics this.batchSizeSensor.record(batch.estimatedSizeInBytes(), now); this.queueTimeSensor.record(batch.queueTimeMs(), now); this.compressionRateSensor.record(batch.compressionRatio()); this.maxRecordSizeSensor.record(batch.maxRecordSize, now); records += batch.recordCount; } this.recordsPerRequestSensor.record(records, now); } } public void recordRetries(String topic, int count) { long now = time.milliseconds(); this.retrySensor.record(count, now); String topicRetryName = "topic." + topic + ".record-retries"; Sensor topicRetrySensor = this.metrics.getSensor(topicRetryName); if (topicRetrySensor != null) topicRetrySensor.record(count, now); } public void recordErrors(String topic, int count) { long now = time.milliseconds(); this.errorSensor.record(count, now); String topicErrorName = "topic." + topic + ".record-errors"; Sensor topicErrorSensor = this.metrics.getSensor(topicErrorName); if (topicErrorSensor != null) topicErrorSensor.record(count, now); } public void recordLatency(String node, long latency) { long now = time.milliseconds(); this.requestTimeSensor.record(latency, now); if (!node.isEmpty()) { String nodeTimeName = "node-" + node + ".latency"; Sensor nodeRequestTime = this.metrics.getSensor(nodeTimeName); if (nodeRequestTime != null) nodeRequestTime.record(latency, now); } } void recordBatchSplit() { this.batchSplitSensor.record(); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/SenderMetricsRegistry.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import java.util.ArrayList; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.MetricNameTemplate; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; public class SenderMetricsRegistry { final static String TOPIC_METRIC_GROUP_NAME = "producer-topic-metrics"; private final List<MetricNameTemplate> allTemplates; public final MetricName batchSizeAvg; public final MetricName batchSizeMax; public final MetricName compressionRateAvg; public final MetricName recordQueueTimeAvg; public final MetricName recordQueueTimeMax; public final MetricName requestLatencyAvg; public final MetricName requestLatencyMax; public final MetricName produceThrottleTimeAvg; public final MetricName produceThrottleTimeMax; public final MetricName recordSendRate; public final MetricName recordSendTotal; public final MetricName recordsPerRequestAvg; public final MetricName recordRetryRate; public final MetricName recordRetryTotal; public final MetricName recordErrorRate; public final MetricName recordErrorTotal; public final MetricName recordSizeMax; public final MetricName recordSizeAvg; public final MetricName requestsInFlight; public final MetricName metadataAge; public final MetricName batchSplitRate; public final MetricName batchSplitTotal; private final MetricNameTemplate topicRecordSendRate; private final MetricNameTemplate topicRecordSendTotal; private final MetricNameTemplate topicByteRate; private final MetricNameTemplate topicByteTotal; private final MetricNameTemplate topicCompressionRate; private final MetricNameTemplate topicRecordRetryRate; private final MetricNameTemplate topicRecordRetryTotal; private final MetricNameTemplate topicRecordErrorRate; private final MetricNameTemplate topicRecordErrorTotal; private final Metrics metrics; private final Set<String> tags; private final LinkedHashSet<String> topicTags; public SenderMetricsRegistry(Metrics metrics) { this.metrics = metrics; this.tags = this.metrics.config().tags().keySet(); this.allTemplates = new ArrayList<>(); /***** Client level *****/ this.batchSizeAvg = createMetricName("batch-size-avg", "The average number of bytes sent per partition per-request."); this.batchSizeMax = createMetricName("batch-size-max", "The max number of bytes sent per partition per-request."); this.compressionRateAvg = createMetricName("compression-rate-avg", "The average compression rate of record batches, defined as the average ratio of the " + "compressed batch size over the uncompressed size."); this.recordQueueTimeAvg = createMetricName("record-queue-time-avg", "The average time in ms record batches spent in the send buffer."); this.recordQueueTimeMax = createMetricName("record-queue-time-max", "The maximum time in ms record batches spent in the send buffer."); this.requestLatencyAvg = createMetricName("request-latency-avg", "The average request latency in ms"); this.requestLatencyMax = createMetricName("request-latency-max", "The maximum request latency in ms"); this.recordSendRate = createMetricName("record-send-rate", "The average number of records sent per second."); this.recordSendTotal = createMetricName("record-send-total", "The total number of records sent."); this.recordsPerRequestAvg = createMetricName("records-per-request-avg", "The average number of records per request."); this.recordRetryRate = createMetricName("record-retry-rate", "The average per-second number of retried record sends"); this.recordRetryTotal = createMetricName("record-retry-total", "The total number of retried record sends"); this.recordErrorRate = createMetricName("record-error-rate", "The average per-second number of record sends that resulted in errors"); this.recordErrorTotal = createMetricName("record-error-total", "The total number of record sends that resulted in errors"); this.recordSizeMax = createMetricName("record-size-max", "The maximum record size"); this.recordSizeAvg = createMetricName("record-size-avg", "The average record size"); this.requestsInFlight = createMetricName("requests-in-flight", "The current number of in-flight requests awaiting a response."); this.metadataAge = createMetricName("metadata-age", "The age in seconds of the current producer metadata being used."); this.batchSplitRate = createMetricName("batch-split-rate", "The average number of batch splits per second"); this.batchSplitTotal = createMetricName("batch-split-total", "The total number of batch splits"); this.produceThrottleTimeAvg = createMetricName("produce-throttle-time-avg", "The average time in ms a request was throttled by a broker"); this.produceThrottleTimeMax = createMetricName("produce-throttle-time-max", "The maximum time in ms a request was throttled by a broker"); /***** Topic level *****/ this.topicTags = new LinkedHashSet<>(tags); this.topicTags.add("topic"); // We can't create the MetricName up front for these, because we don't know the topic name yet. this.topicRecordSendRate = createTopicTemplate("record-send-rate", "The average number of records sent per second for a topic."); this.topicRecordSendTotal = createTopicTemplate("record-send-total", "The total number of records sent for a topic."); this.topicByteRate = createTopicTemplate("byte-rate", "The average number of bytes sent per second for a topic."); this.topicByteTotal = createTopicTemplate("byte-total", "The total number of bytes sent for a topic."); this.topicCompressionRate = createTopicTemplate("compression-rate", "The average compression rate of record batches for a topic, defined as the average ratio " + "of the compressed batch size over the uncompressed size."); this.topicRecordRetryRate = createTopicTemplate("record-retry-rate", "The average per-second number of retried record sends for a topic"); this.topicRecordRetryTotal = createTopicTemplate("record-retry-total", "The total number of retried record sends for a topic"); this.topicRecordErrorRate = createTopicTemplate("record-error-rate", "The average per-second number of record sends that resulted in errors for a topic"); this.topicRecordErrorTotal = createTopicTemplate("record-error-total", "The total number of record sends that resulted in errors for a topic"); } private MetricName createMetricName(String name, String description) { return this.metrics.metricInstance(createTemplate(name, KafkaProducerMetrics.GROUP, description, this.tags)); } private MetricNameTemplate createTopicTemplate(String name, String description) { return createTemplate(name, TOPIC_METRIC_GROUP_NAME, description, this.topicTags); } /** topic level metrics **/ public MetricName topicRecordSendRate(Map<String, String> tags) { return this.metrics.metricInstance(this.topicRecordSendRate, tags); } public MetricName topicRecordSendTotal(Map<String, String> tags) { return this.metrics.metricInstance(this.topicRecordSendTotal, tags); } public MetricName topicByteRate(Map<String, String> tags) { return this.metrics.metricInstance(this.topicByteRate, tags); } public MetricName topicByteTotal(Map<String, String> tags) { return this.metrics.metricInstance(this.topicByteTotal, tags); } public MetricName topicCompressionRate(Map<String, String> tags) { return this.metrics.metricInstance(this.topicCompressionRate, tags); } public MetricName topicRecordRetryRate(Map<String, String> tags) { return this.metrics.metricInstance(this.topicRecordRetryRate, tags); } public MetricName topicRecordRetryTotal(Map<String, String> tags) { return this.metrics.metricInstance(this.topicRecordRetryTotal, tags); } public MetricName topicRecordErrorRate(Map<String, String> tags) { return this.metrics.metricInstance(this.topicRecordErrorRate, tags); } public MetricName topicRecordErrorTotal(Map<String, String> tags) { return this.metrics.metricInstance(this.topicRecordErrorTotal, tags); } public List<MetricNameTemplate> allTemplates() { return allTemplates; } public Sensor sensor(String name) { return this.metrics.sensor(name); } public void addMetric(MetricName m, Measurable measurable) { this.metrics.addMetric(m, measurable); } public Sensor getSensor(String name) { return this.metrics.getSensor(name); } private MetricNameTemplate createTemplate(String name, String group, String description, Set<String> tags) { MetricNameTemplate template = new MetricNameTemplate(name, group, description, tags); this.allTemplates.add(template); return template; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/StickyPartitionCache.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.PartitionInfo; import java.util.List; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentHashMap; import org.apache.kafka.common.utils.Utils; /** * An internal class that implements a cache used for sticky partitioning behavior. The cache tracks the current sticky * partition for any given topic. This class should not be used externally. */ public class StickyPartitionCache { private final ConcurrentMap<String, Integer> indexCache; public StickyPartitionCache() { this.indexCache = new ConcurrentHashMap<>(); } public int partition(String topic, Cluster cluster) { Integer part = indexCache.get(topic); if (part == null) { return nextPartition(topic, cluster, -1); } return part; } public int nextPartition(String topic, Cluster cluster, int prevPartition) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); Integer oldPart = indexCache.get(topic); Integer newPart = oldPart; // Check that the current sticky partition for the topic is either not set or that the partition that // triggered the new batch matches the sticky partition that needs to be changed. if (oldPart == null || oldPart == prevPartition) { List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() < 1) { Integer random = Utils.toPositive(ThreadLocalRandom.current().nextInt()); newPart = random % partitions.size(); } else if (availablePartitions.size() == 1) { newPart = availablePartitions.get(0).partition(); } else { while (newPart == null || newPart.equals(oldPart)) { int random = Utils.toPositive(ThreadLocalRandom.current().nextInt()); newPart = availablePartitions.get(random % availablePartitions.size()).partition(); } } // Only change the sticky partition if it is null or prevPartition matches the current sticky partition. if (oldPart == null) { indexCache.putIfAbsent(topic, newPart); } else { indexCache.replace(topic, prevPartition, newPart); } return indexCache.get(topic); } return indexCache.get(topic); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/TransactionManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.NodeApiVersions; import org.apache.kafka.clients.RequestCompletionHandler; import org.apache.kafka.clients.consumer.CommitFailedException; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.errors.InvalidPidMappingException; import org.apache.kafka.common.errors.InvalidProducerEpochException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.UnknownProducerIdException; import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion; import org.apache.kafka.common.message.FindCoordinatorResponseData.Coordinator; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.utils.ProducerIdAndEpoch; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.OutOfOrderSequenceException; import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.AddOffsetsToTxnRequestData; import org.apache.kafka.common.message.EndTxnRequestData; import org.apache.kafka.common.message.FindCoordinatorRequestData; import org.apache.kafka.common.message.InitProducerIdRequestData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.DefaultRecordBatch; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.AddOffsetsToTxnRequest; import org.apache.kafka.common.requests.AddOffsetsToTxnResponse; import org.apache.kafka.common.requests.AddPartitionsToTxnRequest; import org.apache.kafka.common.requests.AddPartitionsToTxnResponse; import org.apache.kafka.common.requests.EndTxnRequest; import org.apache.kafka.common.requests.EndTxnResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.requests.FindCoordinatorResponse; import org.apache.kafka.common.requests.InitProducerIdRequest; import org.apache.kafka.common.requests.InitProducerIdResponse; import org.apache.kafka.common.requests.ProduceResponse; import org.apache.kafka.common.requests.TransactionResult; import org.apache.kafka.common.requests.TxnOffsetCommitRequest; import org.apache.kafka.common.requests.TxnOffsetCommitRequest.CommittedOffset; import org.apache.kafka.common.requests.TxnOffsetCommitResponse; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; import java.util.ArrayList; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.OptionalInt; import java.util.OptionalLong; import java.util.PriorityQueue; import java.util.Set; import java.util.SortedSet; import java.util.function.Supplier; /** * A class which maintains state for transactions. Also keeps the state necessary to ensure idempotent production. */ public class TransactionManager { private static final int NO_INFLIGHT_REQUEST_CORRELATION_ID = -1; static final int NO_LAST_ACKED_SEQUENCE_NUMBER = -1; private final Logger log; private final String transactionalId; private final int transactionTimeoutMs; private final ApiVersions apiVersions; private final TxnPartitionMap txnPartitionMap; private final Map<TopicPartition, CommittedOffset> pendingTxnOffsetCommits; // If a batch bound for a partition expired locally after being sent at least once, the partition is considered // to have an unresolved state. We keep track of such partitions here, and cannot assign any more sequence numbers // for this partition until the unresolved state gets cleared. This may happen if other inflight batches returned // successfully (indicating that the expired batch actually made it to the broker). If we don't get any successful // responses for the partition once the inflight request count falls to zero, we reset the producer id and // consequently clear this data structure as well. // The value of the map is the sequence number of the batch following the expired one, computed by adding its // record count to its sequence number. This is used to tell if a subsequent batch is the one immediately following // the expired one. private final Map<TopicPartition, Integer> partitionsWithUnresolvedSequences; // The partitions that have received an error that triggers an epoch bump. When the epoch is bumped, these // partitions will have the sequences of their in-flight batches rewritten private final Set<TopicPartition> partitionsToRewriteSequences; private final PriorityQueue<TxnRequestHandler> pendingRequests; private final Set<TopicPartition> newPartitionsInTransaction; private final Set<TopicPartition> pendingPartitionsInTransaction; private final Set<TopicPartition> partitionsInTransaction; private PendingStateTransition pendingTransition; // This is used by the TxnRequestHandlers to control how long to back off before a given request is retried. // For instance, this value is lowered by the AddPartitionsToTxnHandler when it receives a CONCURRENT_TRANSACTIONS // error for the first AddPartitionsRequest in a transaction. private final long retryBackoffMs; // The retryBackoff is overridden to the following value if the first AddPartitions receives a // CONCURRENT_TRANSACTIONS error. private static final long ADD_PARTITIONS_RETRY_BACKOFF_MS = 20L; private int inFlightRequestCorrelationId = NO_INFLIGHT_REQUEST_CORRELATION_ID; private Node transactionCoordinator; private Node consumerGroupCoordinator; private boolean coordinatorSupportsBumpingEpoch; private volatile State currentState = State.UNINITIALIZED; private volatile RuntimeException lastError = null; private volatile ProducerIdAndEpoch producerIdAndEpoch; private volatile boolean transactionStarted = false; private volatile boolean epochBumpRequired = false; private enum State { UNINITIALIZED, INITIALIZING, READY, IN_TRANSACTION, COMMITTING_TRANSACTION, ABORTING_TRANSACTION, ABORTABLE_ERROR, FATAL_ERROR; private boolean isTransitionValid(State source, State target) { switch (target) { case UNINITIALIZED: return source == READY; case INITIALIZING: return source == UNINITIALIZED || source == ABORTING_TRANSACTION; case READY: return source == INITIALIZING || source == COMMITTING_TRANSACTION || source == ABORTING_TRANSACTION; case IN_TRANSACTION: return source == READY; case COMMITTING_TRANSACTION: return source == IN_TRANSACTION; case ABORTING_TRANSACTION: return source == IN_TRANSACTION || source == ABORTABLE_ERROR; case ABORTABLE_ERROR: return source == IN_TRANSACTION || source == COMMITTING_TRANSACTION || source == ABORTABLE_ERROR; case FATAL_ERROR: default: // We can transition to FATAL_ERROR unconditionally. // FATAL_ERROR is never a valid starting state for any transition. So the only option is to close the // producer or do purely non transactional requests. return true; } } } // We use the priority to determine the order in which requests need to be sent out. For instance, if we have // a pending FindCoordinator request, that must always go first. Next, If we need a producer id, that must go second. // The endTxn request must always go last, unless we are bumping the epoch (a special case of InitProducerId) as // part of ending the transaction. private enum Priority { FIND_COORDINATOR(0), INIT_PRODUCER_ID(1), ADD_PARTITIONS_OR_OFFSETS(2), END_TXN(3), EPOCH_BUMP(4); final int priority; Priority(int priority) { this.priority = priority; } } public TransactionManager(final LogContext logContext, final String transactionalId, final int transactionTimeoutMs, final long retryBackoffMs, final ApiVersions apiVersions) { this.producerIdAndEpoch = ProducerIdAndEpoch.NONE; this.transactionalId = transactionalId; this.log = logContext.logger(TransactionManager.class); this.transactionTimeoutMs = transactionTimeoutMs; this.transactionCoordinator = null; this.consumerGroupCoordinator = null; this.newPartitionsInTransaction = new HashSet<>(); this.pendingPartitionsInTransaction = new HashSet<>(); this.partitionsInTransaction = new HashSet<>(); this.pendingRequests = new PriorityQueue<>(10, Comparator.comparingInt(o -> o.priority().priority)); this.pendingTxnOffsetCommits = new HashMap<>(); this.partitionsWithUnresolvedSequences = new HashMap<>(); this.partitionsToRewriteSequences = new HashSet<>(); this.retryBackoffMs = retryBackoffMs; this.txnPartitionMap = new TxnPartitionMap(); this.apiVersions = apiVersions; } public synchronized TransactionalRequestResult initializeTransactions() { return initializeTransactions(ProducerIdAndEpoch.NONE); } synchronized TransactionalRequestResult initializeTransactions(ProducerIdAndEpoch producerIdAndEpoch) { maybeFailWithError(); boolean isEpochBump = producerIdAndEpoch != ProducerIdAndEpoch.NONE; return handleCachedTransactionRequestResult(() -> { // If this is an epoch bump, we will transition the state as part of handling the EndTxnRequest if (!isEpochBump) { transitionTo(State.INITIALIZING); log.info("Invoking InitProducerId for the first time in order to acquire a producer ID"); } else { log.info("Invoking InitProducerId with current producer ID and epoch {} in order to bump the epoch", producerIdAndEpoch); } InitProducerIdRequestData requestData = new InitProducerIdRequestData() .setTransactionalId(transactionalId) .setTransactionTimeoutMs(transactionTimeoutMs) .setProducerId(producerIdAndEpoch.producerId) .setProducerEpoch(producerIdAndEpoch.epoch); InitProducerIdHandler handler = new InitProducerIdHandler(new InitProducerIdRequest.Builder(requestData), isEpochBump); enqueueRequest(handler); return handler.result; }, State.INITIALIZING, "initTransactions"); } public synchronized void beginTransaction() { ensureTransactional(); throwIfPendingState("beginTransaction"); maybeFailWithError(); transitionTo(State.IN_TRANSACTION); } public synchronized TransactionalRequestResult beginCommit() { return handleCachedTransactionRequestResult(() -> { maybeFailWithError(); transitionTo(State.COMMITTING_TRANSACTION); return beginCompletingTransaction(TransactionResult.COMMIT); }, State.COMMITTING_TRANSACTION, "commitTransaction"); } public synchronized TransactionalRequestResult beginAbort() { return handleCachedTransactionRequestResult(() -> { if (currentState != State.ABORTABLE_ERROR) maybeFailWithError(); transitionTo(State.ABORTING_TRANSACTION); // We're aborting the transaction, so there should be no need to add new partitions newPartitionsInTransaction.clear(); return beginCompletingTransaction(TransactionResult.ABORT); }, State.ABORTING_TRANSACTION, "abortTransaction"); } private TransactionalRequestResult beginCompletingTransaction(TransactionResult transactionResult) { if (!newPartitionsInTransaction.isEmpty()) enqueueRequest(addPartitionsToTransactionHandler()); // If the error is an INVALID_PRODUCER_ID_MAPPING error, the server will not accept an EndTxnRequest, so skip // directly to InitProducerId. Otherwise, we must first abort the transaction, because the producer will be // fenced if we directly call InitProducerId. if (!(lastError instanceof InvalidPidMappingException)) { EndTxnRequest.Builder builder = new EndTxnRequest.Builder( new EndTxnRequestData() .setTransactionalId(transactionalId) .setProducerId(producerIdAndEpoch.producerId) .setProducerEpoch(producerIdAndEpoch.epoch) .setCommitted(transactionResult.id)); EndTxnHandler handler = new EndTxnHandler(builder); enqueueRequest(handler); if (!epochBumpRequired) { return handler.result; } } return initializeTransactions(this.producerIdAndEpoch); } public synchronized TransactionalRequestResult sendOffsetsToTransaction(final Map<TopicPartition, OffsetAndMetadata> offsets, final ConsumerGroupMetadata groupMetadata) { ensureTransactional(); throwIfPendingState("sendOffsetsToTransaction"); maybeFailWithError(); if (currentState != State.IN_TRANSACTION) { throw new IllegalStateException("Cannot send offsets if a transaction is not in progress " + "(currentState= " + currentState + ")"); } log.debug("Begin adding offsets {} for consumer group {} to transaction", offsets, groupMetadata); AddOffsetsToTxnRequest.Builder builder = new AddOffsetsToTxnRequest.Builder( new AddOffsetsToTxnRequestData() .setTransactionalId(transactionalId) .setProducerId(producerIdAndEpoch.producerId) .setProducerEpoch(producerIdAndEpoch.epoch) .setGroupId(groupMetadata.groupId()) ); AddOffsetsToTxnHandler handler = new AddOffsetsToTxnHandler(builder, offsets, groupMetadata); enqueueRequest(handler); return handler.result; } public synchronized void maybeAddPartition(TopicPartition topicPartition) { maybeFailWithError(); throwIfPendingState("send"); if (isTransactional()) { if (!hasProducerId()) { throw new IllegalStateException("Cannot add partition " + topicPartition + " to transaction before completing a call to initTransactions"); } else if (currentState != State.IN_TRANSACTION) { throw new IllegalStateException("Cannot add partition " + topicPartition + " to transaction while in state " + currentState); } else if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) { return; } else { log.debug("Begin adding new partition {} to transaction", topicPartition); txnPartitionMap.getOrCreate(topicPartition); newPartitionsInTransaction.add(topicPartition); } } } RuntimeException lastError() { return lastError; } synchronized boolean isSendToPartitionAllowed(TopicPartition tp) { if (hasFatalError()) return false; return !isTransactional() || partitionsInTransaction.contains(tp); } public String transactionalId() { return transactionalId; } public boolean hasProducerId() { return producerIdAndEpoch.isValid(); } public boolean isTransactional() { return transactionalId != null; } synchronized boolean hasPartitionsToAdd() { return !newPartitionsInTransaction.isEmpty() || !pendingPartitionsInTransaction.isEmpty(); } synchronized boolean isCompleting() { return currentState == State.COMMITTING_TRANSACTION || currentState == State.ABORTING_TRANSACTION; } synchronized boolean hasError() { return currentState == State.ABORTABLE_ERROR || currentState == State.FATAL_ERROR; } synchronized boolean isAborting() { return currentState == State.ABORTING_TRANSACTION; } synchronized void transitionToAbortableError(RuntimeException exception) { if (currentState == State.ABORTING_TRANSACTION) { log.debug("Skipping transition to abortable error state since the transaction is already being " + "aborted. Underlying exception: ", exception); return; } log.info("Transiting to abortable error state due to {}", exception.toString()); transitionTo(State.ABORTABLE_ERROR, exception); } synchronized void transitionToFatalError(RuntimeException exception) { log.info("Transiting to fatal error state due to {}", exception.toString()); transitionTo(State.FATAL_ERROR, exception); if (pendingTransition != null) { pendingTransition.result.fail(exception); } } // visible for testing synchronized boolean isPartitionAdded(TopicPartition partition) { return partitionsInTransaction.contains(partition); } // visible for testing synchronized boolean isPartitionPendingAdd(TopicPartition partition) { return newPartitionsInTransaction.contains(partition) || pendingPartitionsInTransaction.contains(partition); } /** * Get the current producer id and epoch without blocking. Callers must use {@link ProducerIdAndEpoch#isValid()} to * verify that the result is valid. * * @return the current ProducerIdAndEpoch. */ ProducerIdAndEpoch producerIdAndEpoch() { return producerIdAndEpoch; } synchronized public void maybeUpdateProducerIdAndEpoch(TopicPartition topicPartition) { if (hasStaleProducerIdAndEpoch(topicPartition) && !hasInflightBatches(topicPartition)) { // If the batch was on a different ID and/or epoch (due to an epoch bump) and all its in-flight batches // have completed, reset the partition sequence so that the next batch (with the new epoch) starts from 0 txnPartitionMap.startSequencesAtBeginning(topicPartition, this.producerIdAndEpoch); log.debug("ProducerId of partition {} set to {} with epoch {}. Reinitialize sequence at beginning.", topicPartition, producerIdAndEpoch.producerId, producerIdAndEpoch.epoch); } } /** * Set the producer id and epoch atomically. */ private void setProducerIdAndEpoch(ProducerIdAndEpoch producerIdAndEpoch) { log.info("ProducerId set to {} with epoch {}", producerIdAndEpoch.producerId, producerIdAndEpoch.epoch); this.producerIdAndEpoch = producerIdAndEpoch; } /** * This method resets the producer ID and epoch and sets the state to UNINITIALIZED, which will trigger a new * InitProducerId request. This method is only called when the producer epoch is exhausted; we will bump the epoch * instead. */ private void resetIdempotentProducerId() { if (isTransactional()) throw new IllegalStateException("Cannot reset producer state for a transactional producer. " + "You must either abort the ongoing transaction or reinitialize the transactional producer instead"); log.debug("Resetting idempotent producer ID. ID and epoch before reset are {}", this.producerIdAndEpoch); setProducerIdAndEpoch(ProducerIdAndEpoch.NONE); transitionTo(State.UNINITIALIZED); } private void resetSequenceForPartition(TopicPartition topicPartition) { txnPartitionMap.topicPartitions.remove(topicPartition); this.partitionsWithUnresolvedSequences.remove(topicPartition); } private void resetSequenceNumbers() { txnPartitionMap.reset(); this.partitionsWithUnresolvedSequences.clear(); } synchronized void requestEpochBumpForPartition(TopicPartition tp) { epochBumpRequired = true; this.partitionsToRewriteSequences.add(tp); } private void bumpIdempotentProducerEpoch() { if (this.producerIdAndEpoch.epoch == Short.MAX_VALUE) { resetIdempotentProducerId(); } else { setProducerIdAndEpoch(new ProducerIdAndEpoch(this.producerIdAndEpoch.producerId, (short) (this.producerIdAndEpoch.epoch + 1))); log.debug("Incremented producer epoch, current producer ID and epoch are now {}", this.producerIdAndEpoch); } // When the epoch is bumped, rewrite all in-flight sequences for the partition(s) that triggered the epoch bump for (TopicPartition topicPartition : this.partitionsToRewriteSequences) { this.txnPartitionMap.startSequencesAtBeginning(topicPartition, this.producerIdAndEpoch); this.partitionsWithUnresolvedSequences.remove(topicPartition); } this.partitionsToRewriteSequences.clear(); epochBumpRequired = false; } synchronized void bumpIdempotentEpochAndResetIdIfNeeded() { if (!isTransactional()) { if (epochBumpRequired) { bumpIdempotentProducerEpoch(); } if (currentState != State.INITIALIZING && !hasProducerId()) { transitionTo(State.INITIALIZING); InitProducerIdRequestData requestData = new InitProducerIdRequestData() .setTransactionalId(null) .setTransactionTimeoutMs(Integer.MAX_VALUE); InitProducerIdHandler handler = new InitProducerIdHandler(new InitProducerIdRequest.Builder(requestData), false); enqueueRequest(handler); } } } /** * Returns the next sequence number to be written to the given TopicPartition. */ synchronized Integer sequenceNumber(TopicPartition topicPartition) { return txnPartitionMap.getOrCreate(topicPartition).nextSequence; } /** * Returns the current producer id/epoch of the given TopicPartition. */ synchronized ProducerIdAndEpoch producerIdAndEpoch(TopicPartition topicPartition) { return txnPartitionMap.getOrCreate(topicPartition).producerIdAndEpoch; } synchronized void incrementSequenceNumber(TopicPartition topicPartition, int increment) { Integer currentSequence = sequenceNumber(topicPartition); currentSequence = DefaultRecordBatch.incrementSequence(currentSequence, increment); txnPartitionMap.get(topicPartition).nextSequence = currentSequence; } synchronized void addInFlightBatch(ProducerBatch batch) { if (!batch.hasSequence()) throw new IllegalStateException("Can't track batch for partition " + batch.topicPartition + " when sequence is not set."); txnPartitionMap.get(batch.topicPartition).inflightBatchesBySequence.add(batch); } /** * Returns the first inflight sequence for a given partition. This is the base sequence of an inflight batch with * the lowest sequence number. * @return the lowest inflight sequence if the transaction manager is tracking inflight requests for this partition. * If there are no inflight requests being tracked for this partition, this method will return * RecordBatch.NO_SEQUENCE. */ synchronized int firstInFlightSequence(TopicPartition topicPartition) { if (!hasInflightBatches(topicPartition)) return RecordBatch.NO_SEQUENCE; SortedSet<ProducerBatch> inflightBatches = txnPartitionMap.get(topicPartition).inflightBatchesBySequence; if (inflightBatches.isEmpty()) return RecordBatch.NO_SEQUENCE; else return inflightBatches.first().baseSequence(); } synchronized ProducerBatch nextBatchBySequence(TopicPartition topicPartition) { SortedSet<ProducerBatch> queue = txnPartitionMap.get(topicPartition).inflightBatchesBySequence; return queue.isEmpty() ? null : queue.first(); } synchronized void removeInFlightBatch(ProducerBatch batch) { if (hasInflightBatches(batch.topicPartition)) { txnPartitionMap.get(batch.topicPartition).inflightBatchesBySequence.remove(batch); } } private int maybeUpdateLastAckedSequence(TopicPartition topicPartition, int sequence) { int lastAckedSequence = lastAckedSequence(topicPartition).orElse(NO_LAST_ACKED_SEQUENCE_NUMBER); if (sequence > lastAckedSequence) { txnPartitionMap.get(topicPartition).lastAckedSequence = sequence; return sequence; } return lastAckedSequence; } synchronized OptionalInt lastAckedSequence(TopicPartition topicPartition) { return txnPartitionMap.lastAckedSequence(topicPartition); } synchronized OptionalLong lastAckedOffset(TopicPartition topicPartition) { return txnPartitionMap.lastAckedOffset(topicPartition); } private void updateLastAckedOffset(ProduceResponse.PartitionResponse response, ProducerBatch batch) { if (response.baseOffset == ProduceResponse.INVALID_OFFSET) return; long lastOffset = response.baseOffset + batch.recordCount - 1; OptionalLong lastAckedOffset = lastAckedOffset(batch.topicPartition); // It might happen that the TransactionManager has been reset while a request was reenqueued and got a valid // response for this. This can happen only if the producer is only idempotent (not transactional) and in // this case there will be no tracked bookkeeper entry about it, so we have to insert one. if (!lastAckedOffset.isPresent() && !isTransactional()) { txnPartitionMap.getOrCreate(batch.topicPartition); } if (lastOffset > lastAckedOffset.orElse(ProduceResponse.INVALID_OFFSET)) { txnPartitionMap.get(batch.topicPartition).lastAckedOffset = lastOffset; } else { log.trace("Partition {} keeps lastOffset at {}", batch.topicPartition, lastOffset); } } public synchronized void handleCompletedBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response) { int lastAckedSequence = maybeUpdateLastAckedSequence(batch.topicPartition, batch.lastSequence()); log.debug("ProducerId: {}; Set last ack'd sequence number for topic-partition {} to {}", batch.producerId(), batch.topicPartition, lastAckedSequence); updateLastAckedOffset(response, batch); removeInFlightBatch(batch); } public synchronized void maybeTransitionToErrorState(RuntimeException exception) { if (exception instanceof ClusterAuthorizationException || exception instanceof TransactionalIdAuthorizationException || exception instanceof ProducerFencedException || exception instanceof UnsupportedVersionException) { transitionToFatalError(exception); } else if (isTransactional()) { if (canBumpEpoch() && !isCompleting()) { epochBumpRequired = true; } transitionToAbortableError(exception); } } synchronized void handleFailedBatch(ProducerBatch batch, RuntimeException exception, boolean adjustSequenceNumbers) { maybeTransitionToErrorState(exception); removeInFlightBatch(batch); if (hasFatalError()) { log.debug("Ignoring batch {} with producer id {}, epoch {}, and sequence number {} " + "since the producer is already in fatal error state", batch, batch.producerId(), batch.producerEpoch(), batch.baseSequence(), exception); return; } if (exception instanceof OutOfOrderSequenceException && !isTransactional()) { log.error("The broker returned {} for topic-partition {} with producerId {}, epoch {}, and sequence number {}", exception, batch.topicPartition, batch.producerId(), batch.producerEpoch(), batch.baseSequence()); // If we fail with an OutOfOrderSequenceException, we have a gap in the log. Bump the epoch for this // partition, which will reset the sequence number to 0 and allow us to continue requestEpochBumpForPartition(batch.topicPartition); } else if (exception instanceof UnknownProducerIdException) { // If we get an UnknownProducerId for a partition, then the broker has no state for that producer. It will // therefore accept a write with sequence number 0. We reset the sequence number for the partition here so // that the producer can continue after aborting the transaction. All inflight-requests to this partition // will also fail with an UnknownProducerId error, so the sequence will remain at 0. Note that if the // broker supports bumping the epoch, we will later reset all sequence numbers after calling InitProducerId resetSequenceForPartition(batch.topicPartition); } else { if (adjustSequenceNumbers) { if (!isTransactional()) { requestEpochBumpForPartition(batch.topicPartition); } else { adjustSequencesDueToFailedBatch(batch); } } } } // If a batch is failed fatally, the sequence numbers for future batches bound for the partition must be adjusted // so that they don't fail with the OutOfOrderSequenceException. // // This method must only be called when we know that the batch is question has been unequivocally failed by the broker, // ie. it has received a confirmed fatal status code like 'Message Too Large' or something similar. private void adjustSequencesDueToFailedBatch(ProducerBatch batch) { if (!txnPartitionMap.contains(batch.topicPartition)) // Sequence numbers are not being tracked for this partition. This could happen if the producer id was just // reset due to a previous OutOfOrderSequenceException. return; log.debug("producerId: {}, send to partition {} failed fatally. Reducing future sequence numbers by {}", batch.producerId(), batch.topicPartition, batch.recordCount); int currentSequence = sequenceNumber(batch.topicPartition); currentSequence -= batch.recordCount; if (currentSequence < 0) throw new IllegalStateException("Sequence number for partition " + batch.topicPartition + " is going to become negative: " + currentSequence); setNextSequence(batch.topicPartition, currentSequence); txnPartitionMap.get(batch.topicPartition).resetSequenceNumbers(inFlightBatch -> { if (inFlightBatch.baseSequence() < batch.baseSequence()) return; int newSequence = inFlightBatch.baseSequence() - batch.recordCount; if (newSequence < 0) throw new IllegalStateException("Sequence number for batch with sequence " + inFlightBatch.baseSequence() + " for partition " + batch.topicPartition + " is going to become negative: " + newSequence); inFlightBatch.resetProducerState(new ProducerIdAndEpoch(inFlightBatch.producerId(), inFlightBatch.producerEpoch()), newSequence, inFlightBatch.isTransactional()); }); } synchronized boolean hasInflightBatches(TopicPartition topicPartition) { return !txnPartitionMap.getOrCreate(topicPartition).inflightBatchesBySequence.isEmpty(); } synchronized boolean hasStaleProducerIdAndEpoch(TopicPartition topicPartition) { return !producerIdAndEpoch.equals(txnPartitionMap.getOrCreate(topicPartition).producerIdAndEpoch); } synchronized boolean hasUnresolvedSequences() { return !partitionsWithUnresolvedSequences.isEmpty(); } synchronized boolean hasUnresolvedSequence(TopicPartition topicPartition) { return partitionsWithUnresolvedSequences.containsKey(topicPartition); } synchronized void markSequenceUnresolved(ProducerBatch batch) { int nextSequence = batch.lastSequence() + 1; partitionsWithUnresolvedSequences.compute(batch.topicPartition, (k, v) -> v == null ? nextSequence : Math.max(v, nextSequence)); log.debug("Marking partition {} unresolved with next sequence number {}", batch.topicPartition, partitionsWithUnresolvedSequences.get(batch.topicPartition)); } // Attempts to resolve unresolved sequences. If all in-flight requests are complete and some partitions are still // unresolved, either bump the epoch if possible, or transition to a fatal error synchronized void maybeResolveSequences() { for (Iterator<TopicPartition> iter = partitionsWithUnresolvedSequences.keySet().iterator(); iter.hasNext(); ) { TopicPartition topicPartition = iter.next(); if (!hasInflightBatches(topicPartition)) { // The partition has been fully drained. At this point, the last ack'd sequence should be one less than // next sequence destined for the partition. If so, the partition is fully resolved. If not, we should // reset the sequence number if necessary. if (isNextSequence(topicPartition, sequenceNumber(topicPartition))) { // This would happen when a batch was expired, but subsequent batches succeeded. iter.remove(); } else { // We would enter this branch if all in flight batches were ultimately expired in the producer. if (isTransactional()) { // For the transactional producer, we bump the epoch if possible, otherwise we transition to a fatal error String unackedMessagesErr = "The client hasn't received acknowledgment for some previously " + "sent messages and can no longer retry them. "; if (canBumpEpoch()) { epochBumpRequired = true; KafkaException exception = new KafkaException(unackedMessagesErr + "It is safe to abort " + "the transaction and continue."); transitionToAbortableError(exception); } else { KafkaException exception = new KafkaException(unackedMessagesErr + "It isn't safe to continue."); transitionToFatalError(exception); } } else { // For the idempotent producer, bump the epoch log.info("No inflight batches remaining for {}, last ack'd sequence for partition is {}, next sequence is {}. " + "Going to bump epoch and reset sequence numbers.", topicPartition, lastAckedSequence(topicPartition).orElse(NO_LAST_ACKED_SEQUENCE_NUMBER), sequenceNumber(topicPartition)); requestEpochBumpForPartition(topicPartition); } iter.remove(); } } } } private boolean isNextSequence(TopicPartition topicPartition, int sequence) { return sequence - lastAckedSequence(topicPartition).orElse(NO_LAST_ACKED_SEQUENCE_NUMBER) == 1; } private void setNextSequence(TopicPartition topicPartition, int sequence) { txnPartitionMap.get(topicPartition).nextSequence = sequence; } private boolean isNextSequenceForUnresolvedPartition(TopicPartition topicPartition, int sequence) { return this.hasUnresolvedSequence(topicPartition) && sequence == this.partitionsWithUnresolvedSequences.get(topicPartition); } synchronized TxnRequestHandler nextRequest(boolean hasIncompleteBatches) { if (!newPartitionsInTransaction.isEmpty()) enqueueRequest(addPartitionsToTransactionHandler()); TxnRequestHandler nextRequestHandler = pendingRequests.peek(); if (nextRequestHandler == null) return null; // Do not send the EndTxn until all batches have been flushed if (nextRequestHandler.isEndTxn() && hasIncompleteBatches) return null; pendingRequests.poll(); if (maybeTerminateRequestWithError(nextRequestHandler)) { log.trace("Not sending transactional request {} because we are in an error state", nextRequestHandler.requestBuilder()); return null; } if (nextRequestHandler.isEndTxn() && !transactionStarted) { nextRequestHandler.result.done(); if (currentState != State.FATAL_ERROR) { log.debug("Not sending EndTxn for completed transaction since no partitions " + "or offsets were successfully added"); completeTransaction(); } nextRequestHandler = pendingRequests.poll(); } if (nextRequestHandler != null) log.trace("Request {} dequeued for sending", nextRequestHandler.requestBuilder()); return nextRequestHandler; } synchronized void retry(TxnRequestHandler request) { request.setRetry(); enqueueRequest(request); } synchronized void authenticationFailed(AuthenticationException e) { for (TxnRequestHandler request : pendingRequests) request.fatalError(e); } synchronized void close() { KafkaException shutdownException = new KafkaException("The producer closed forcefully"); pendingRequests.forEach(handler -> handler.fatalError(shutdownException)); if (pendingTransition != null) { pendingTransition.result.fail(shutdownException); } } Node coordinator(FindCoordinatorRequest.CoordinatorType type) { switch (type) { case GROUP: return consumerGroupCoordinator; case TRANSACTION: return transactionCoordinator; default: throw new IllegalStateException("Received an invalid coordinator type: " + type); } } void lookupCoordinator(TxnRequestHandler request) { lookupCoordinator(request.coordinatorType(), request.coordinatorKey()); } void setInFlightCorrelationId(int correlationId) { inFlightRequestCorrelationId = correlationId; } private void clearInFlightCorrelationId() { inFlightRequestCorrelationId = NO_INFLIGHT_REQUEST_CORRELATION_ID; } boolean hasInFlightRequest() { return inFlightRequestCorrelationId != NO_INFLIGHT_REQUEST_CORRELATION_ID; } // visible for testing. boolean hasFatalError() { return currentState == State.FATAL_ERROR; } // visible for testing. boolean hasAbortableError() { return currentState == State.ABORTABLE_ERROR; } // visible for testing synchronized boolean transactionContainsPartition(TopicPartition topicPartition) { return partitionsInTransaction.contains(topicPartition); } // visible for testing synchronized boolean hasPendingOffsetCommits() { return !pendingTxnOffsetCommits.isEmpty(); } synchronized boolean hasPendingRequests() { return !pendingRequests.isEmpty(); } // visible for testing synchronized boolean hasOngoingTransaction() { // transactions are considered ongoing once started until completion or a fatal error return currentState == State.IN_TRANSACTION || isCompleting() || hasAbortableError(); } synchronized boolean canRetry(ProduceResponse.PartitionResponse response, ProducerBatch batch) { Errors error = response.error; // An UNKNOWN_PRODUCER_ID means that we have lost the producer state on the broker. Depending on the log start // offset, we may want to retry these, as described for each case below. If none of those apply, then for the // idempotent producer, we will locally bump the epoch and reset the sequence numbers of in-flight batches from // sequence 0, then retry the failed batch, which should now succeed. For the transactional producer, allow the // batch to fail. When processing the failed batch, we will transition to an abortable error and set a flag // indicating that we need to bump the epoch (if supported by the broker). if (error == Errors.UNKNOWN_PRODUCER_ID) { if (response.logStartOffset == -1) { // We don't know the log start offset with this response. We should just retry the request until we get it. // The UNKNOWN_PRODUCER_ID error code was added along with the new ProduceResponse which includes the // logStartOffset. So the '-1' sentinel is not for backward compatibility. Instead, it is possible for // a broker to not know the logStartOffset at when it is returning the response because the partition // may have moved away from the broker from the time the error was initially raised to the time the // response was being constructed. In these cases, we should just retry the request: we are guaranteed // to eventually get a logStartOffset once things settle down. return true; } if (batch.sequenceHasBeenReset()) { // When the first inflight batch fails due to the truncation case, then the sequences of all the other // in flight batches would have been restarted from the beginning. However, when those responses // come back from the broker, they would also come with an UNKNOWN_PRODUCER_ID error. In this case, we should not // reset the sequence numbers to the beginning. return true; } else if (lastAckedOffset(batch.topicPartition).orElse(NO_LAST_ACKED_SEQUENCE_NUMBER) < response.logStartOffset) { // The head of the log has been removed, probably due to the retention time elapsing. In this case, // we expect to lose the producer state. For the transactional producer, reset the sequences of all // inflight batches to be from the beginning and retry them, so that the transaction does not need to // be aborted. For the idempotent producer, bump the epoch to avoid reusing (sequence, epoch) pairs if (isTransactional()) { txnPartitionMap.startSequencesAtBeginning(batch.topicPartition, this.producerIdAndEpoch); } else { requestEpochBumpForPartition(batch.topicPartition); } return true; } if (!isTransactional()) { // For the idempotent producer, always retry UNKNOWN_PRODUCER_ID errors. If the batch has the current // producer ID and epoch, request a bump of the epoch. Otherwise just retry the produce. requestEpochBumpForPartition(batch.topicPartition); return true; } } else if (error == Errors.OUT_OF_ORDER_SEQUENCE_NUMBER) { if (!hasUnresolvedSequence(batch.topicPartition) && (batch.sequenceHasBeenReset() || !isNextSequence(batch.topicPartition, batch.baseSequence()))) { // We should retry the OutOfOrderSequenceException if the batch is _not_ the next batch, ie. its base // sequence isn't the lastAckedSequence + 1. return true; } else if (!isTransactional()) { // For the idempotent producer, retry all OUT_OF_ORDER_SEQUENCE_NUMBER errors. If there are no // unresolved sequences, or this batch is the one immediately following an unresolved sequence, we know // there is actually a gap in the sequences, and we bump the epoch. Otherwise, retry without bumping // and wait to see if the sequence resolves if (!hasUnresolvedSequence(batch.topicPartition) || isNextSequenceForUnresolvedPartition(batch.topicPartition, batch.baseSequence())) { requestEpochBumpForPartition(batch.topicPartition); } return true; } } // If neither of the above cases are true, retry if the exception is retriable return error.exception() instanceof RetriableException; } // visible for testing synchronized boolean isReady() { return isTransactional() && currentState == State.READY; } void handleCoordinatorReady() { NodeApiVersions nodeApiVersions = transactionCoordinator != null ? apiVersions.get(transactionCoordinator.idString()) : null; ApiVersion initProducerIdVersion = nodeApiVersions != null ? nodeApiVersions.apiVersion(ApiKeys.INIT_PRODUCER_ID) : null; this.coordinatorSupportsBumpingEpoch = initProducerIdVersion != null && initProducerIdVersion.maxVersion() >= 3; } private void transitionTo(State target) { transitionTo(target, null); } private void transitionTo(State target, RuntimeException error) { if (!currentState.isTransitionValid(currentState, target)) { String idString = transactionalId == null ? "" : "TransactionalId " + transactionalId + ": "; throw new IllegalStateException(idString + "Invalid transition attempted from state " + currentState.name() + " to state " + target.name()); } if (target == State.FATAL_ERROR || target == State.ABORTABLE_ERROR) { if (error == null) throw new IllegalArgumentException("Cannot transition to " + target + " with a null exception"); lastError = error; } else { lastError = null; } if (lastError != null) log.debug("Transition from state {} to error state {}", currentState, target, lastError); else log.debug("Transition from state {} to {}", currentState, target); currentState = target; } private void ensureTransactional() { if (!isTransactional()) throw new IllegalStateException("Transactional method invoked on a non-transactional producer."); } private void maybeFailWithError() { if (hasError()) { // for ProducerFencedException, do not wrap it as a KafkaException // but create a new instance without the call trace since it was not thrown because of the current call if (lastError instanceof ProducerFencedException) { throw new ProducerFencedException("Producer with transactionalId '" + transactionalId + "' and " + producerIdAndEpoch + " has been fenced by another producer " + "with the same transactionalId"); } else if (lastError instanceof InvalidProducerEpochException) { throw new InvalidProducerEpochException("Producer with transactionalId '" + transactionalId + "' and " + producerIdAndEpoch + " attempted to produce with an old epoch"); } else { throw new KafkaException("Cannot execute transactional method because we are in an error state", lastError); } } } private boolean maybeTerminateRequestWithError(TxnRequestHandler requestHandler) { if (hasError()) { if (hasAbortableError() && requestHandler instanceof FindCoordinatorHandler) // No harm letting the FindCoordinator request go through if we're expecting to abort return false; requestHandler.fail(lastError); return true; } return false; } private void enqueueRequest(TxnRequestHandler requestHandler) { log.debug("Enqueuing transactional request {}", requestHandler.requestBuilder()); pendingRequests.add(requestHandler); } private void lookupCoordinator(FindCoordinatorRequest.CoordinatorType type, String coordinatorKey) { switch (type) { case GROUP: consumerGroupCoordinator = null; break; case TRANSACTION: transactionCoordinator = null; break; default: throw new IllegalStateException("Invalid coordinator type: " + type); } FindCoordinatorRequestData data = new FindCoordinatorRequestData() .setKeyType(type.id()) .setKey(coordinatorKey); FindCoordinatorRequest.Builder builder = new FindCoordinatorRequest.Builder(data); enqueueRequest(new FindCoordinatorHandler(builder)); } private TxnRequestHandler addPartitionsToTransactionHandler() { pendingPartitionsInTransaction.addAll(newPartitionsInTransaction); newPartitionsInTransaction.clear(); AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forClient(transactionalId, producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, new ArrayList<>(pendingPartitionsInTransaction)); return new AddPartitionsToTxnHandler(builder); } private TxnOffsetCommitHandler txnOffsetCommitHandler(TransactionalRequestResult result, Map<TopicPartition, OffsetAndMetadata> offsets, ConsumerGroupMetadata groupMetadata) { for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { OffsetAndMetadata offsetAndMetadata = entry.getValue(); CommittedOffset committedOffset = new CommittedOffset(offsetAndMetadata.offset(), offsetAndMetadata.metadata(), offsetAndMetadata.leaderEpoch()); pendingTxnOffsetCommits.put(entry.getKey(), committedOffset); } final TxnOffsetCommitRequest.Builder builder = new TxnOffsetCommitRequest.Builder(transactionalId, groupMetadata.groupId(), producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, pendingTxnOffsetCommits, groupMetadata.memberId(), groupMetadata.generationId(), groupMetadata.groupInstanceId() ); return new TxnOffsetCommitHandler(result, builder); } private void throwIfPendingState(String operation) { if (pendingTransition != null) { if (pendingTransition.result.isAcked()) { pendingTransition = null; } else { throw new IllegalStateException("Cannot attempt operation `" + operation + "` " + "because the previous call to `" + pendingTransition.operation + "` " + "timed out and must be retried"); } } } private TransactionalRequestResult handleCachedTransactionRequestResult( Supplier<TransactionalRequestResult> transactionalRequestResultSupplier, State nextState, String operation ) { ensureTransactional(); if (pendingTransition != null) { if (pendingTransition.result.isAcked()) { pendingTransition = null; } else if (nextState != pendingTransition.state) { throw new IllegalStateException("Cannot attempt operation `" + operation + "` " + "because the previous call to `" + pendingTransition.operation + "` " + "timed out and must be retried"); } else { return pendingTransition.result; } } TransactionalRequestResult result = transactionalRequestResultSupplier.get(); pendingTransition = new PendingStateTransition(result, nextState, operation); return result; } // package-private for testing boolean canBumpEpoch() { if (!isTransactional()) { return true; } return coordinatorSupportsBumpingEpoch; } private void completeTransaction() { if (epochBumpRequired) { transitionTo(State.INITIALIZING); } else { transitionTo(State.READY); } lastError = null; epochBumpRequired = false; transactionStarted = false; newPartitionsInTransaction.clear(); pendingPartitionsInTransaction.clear(); partitionsInTransaction.clear(); } abstract class TxnRequestHandler implements RequestCompletionHandler { protected final TransactionalRequestResult result; private boolean isRetry = false; TxnRequestHandler(TransactionalRequestResult result) { this.result = result; } TxnRequestHandler(String operation) { this(new TransactionalRequestResult(operation)); } void fatalError(RuntimeException e) { result.fail(e); transitionToFatalError(e); } void abortableError(RuntimeException e) { result.fail(e); transitionToAbortableError(e); } void abortableErrorIfPossible(RuntimeException e) { if (canBumpEpoch()) { epochBumpRequired = true; abortableError(e); } else { fatalError(e); } } void fail(RuntimeException e) { result.fail(e); } void reenqueue() { synchronized (TransactionManager.this) { this.isRetry = true; enqueueRequest(this); } } long retryBackoffMs() { return retryBackoffMs; } @Override public void onComplete(ClientResponse response) { if (response.requestHeader().correlationId() != inFlightRequestCorrelationId) { fatalError(new RuntimeException("Detected more than one in-flight transactional request.")); } else { clearInFlightCorrelationId(); if (response.wasDisconnected()) { log.debug("Disconnected from {}. Will retry.", response.destination()); if (this.needsCoordinator()) lookupCoordinator(this.coordinatorType(), this.coordinatorKey()); reenqueue(); } else if (response.versionMismatch() != null) { fatalError(response.versionMismatch()); } else if (response.hasResponse()) { log.trace("Received transactional response {} for request {}", response.responseBody(), requestBuilder()); synchronized (TransactionManager.this) { handleResponse(response.responseBody()); } } else { fatalError(new KafkaException("Could not execute transactional request for unknown reasons")); } } } boolean needsCoordinator() { return coordinatorType() != null; } FindCoordinatorRequest.CoordinatorType coordinatorType() { return FindCoordinatorRequest.CoordinatorType.TRANSACTION; } String coordinatorKey() { return transactionalId; } void setRetry() { this.isRetry = true; } boolean isRetry() { return isRetry; } boolean isEndTxn() { return false; } abstract AbstractRequest.Builder<?> requestBuilder(); abstract void handleResponse(AbstractResponse responseBody); abstract Priority priority(); } private class InitProducerIdHandler extends TxnRequestHandler { private final InitProducerIdRequest.Builder builder; private final boolean isEpochBump; private InitProducerIdHandler(InitProducerIdRequest.Builder builder, boolean isEpochBump) { super("InitProducerId"); this.builder = builder; this.isEpochBump = isEpochBump; } @Override InitProducerIdRequest.Builder requestBuilder() { return builder; } @Override Priority priority() { return this.isEpochBump ? Priority.EPOCH_BUMP : Priority.INIT_PRODUCER_ID; } @Override FindCoordinatorRequest.CoordinatorType coordinatorType() { if (isTransactional()) { return FindCoordinatorRequest.CoordinatorType.TRANSACTION; } else { return null; } } @Override public void handleResponse(AbstractResponse response) { InitProducerIdResponse initProducerIdResponse = (InitProducerIdResponse) response; Errors error = initProducerIdResponse.error(); if (error == Errors.NONE) { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(initProducerIdResponse.data().producerId(), initProducerIdResponse.data().producerEpoch()); setProducerIdAndEpoch(producerIdAndEpoch); transitionTo(State.READY); lastError = null; if (this.isEpochBump) { resetSequenceNumbers(); } result.done(); } else if (error == Errors.NOT_COORDINATOR || error == Errors.COORDINATOR_NOT_AVAILABLE) { lookupCoordinator(FindCoordinatorRequest.CoordinatorType.TRANSACTION, transactionalId); reenqueue(); } else if (error.exception() instanceof RetriableException) { reenqueue(); } else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED || error == Errors.CLUSTER_AUTHORIZATION_FAILED) { fatalError(error.exception()); } else if (error == Errors.INVALID_PRODUCER_EPOCH || error == Errors.PRODUCER_FENCED) { // We could still receive INVALID_PRODUCER_EPOCH from old versioned transaction coordinator, // just treat it the same as PRODUCE_FENCED. fatalError(Errors.PRODUCER_FENCED.exception()); } else { fatalError(new KafkaException("Unexpected error in InitProducerIdResponse; " + error.message())); } } } private class AddPartitionsToTxnHandler extends TxnRequestHandler { private final AddPartitionsToTxnRequest.Builder builder; private long retryBackoffMs; private AddPartitionsToTxnHandler(AddPartitionsToTxnRequest.Builder builder) { super("AddPartitionsToTxn"); this.builder = builder; this.retryBackoffMs = TransactionManager.this.retryBackoffMs; } @Override AddPartitionsToTxnRequest.Builder requestBuilder() { return builder; } @Override Priority priority() { return Priority.ADD_PARTITIONS_OR_OFFSETS; } @Override public void handleResponse(AbstractResponse response) { AddPartitionsToTxnResponse addPartitionsToTxnResponse = (AddPartitionsToTxnResponse) response; Map<TopicPartition, Errors> errors = addPartitionsToTxnResponse.errors().get(AddPartitionsToTxnResponse.V3_AND_BELOW_TXN_ID); boolean hasPartitionErrors = false; Set<String> unauthorizedTopics = new HashSet<>(); retryBackoffMs = TransactionManager.this.retryBackoffMs; for (Map.Entry<TopicPartition, Errors> topicPartitionErrorEntry : errors.entrySet()) { TopicPartition topicPartition = topicPartitionErrorEntry.getKey(); Errors error = topicPartitionErrorEntry.getValue(); if (error == Errors.NONE) { continue; } else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR) { lookupCoordinator(FindCoordinatorRequest.CoordinatorType.TRANSACTION, transactionalId); reenqueue(); return; } else if (error == Errors.CONCURRENT_TRANSACTIONS) { maybeOverrideRetryBackoffMs(); reenqueue(); return; } else if (error.exception() instanceof RetriableException) { reenqueue(); return; } else if (error == Errors.INVALID_PRODUCER_EPOCH || error == Errors.PRODUCER_FENCED) { // We could still receive INVALID_PRODUCER_EPOCH from old versioned transaction coordinator, // just treat it the same as PRODUCE_FENCED. fatalError(Errors.PRODUCER_FENCED.exception()); return; } else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED) { fatalError(error.exception()); return; } else if (error == Errors.INVALID_TXN_STATE) { fatalError(new KafkaException(error.exception())); return; } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { unauthorizedTopics.add(topicPartition.topic()); } else if (error == Errors.OPERATION_NOT_ATTEMPTED) { log.debug("Did not attempt to add partition {} to transaction because other partitions in the " + "batch had errors.", topicPartition); hasPartitionErrors = true; } else if (error == Errors.UNKNOWN_PRODUCER_ID || error == Errors.INVALID_PRODUCER_ID_MAPPING) { abortableErrorIfPossible(error.exception()); return; } else { log.error("Could not add partition {} due to unexpected error {}", topicPartition, error); hasPartitionErrors = true; } } Set<TopicPartition> partitions = errors.keySet(); // Remove the partitions from the pending set regardless of the result. We use the presence // of partitions in the pending set to know when it is not safe to send batches. However, if // the partitions failed to be added and we enter an error state, we expect the batches to be // aborted anyway. In this case, we must be able to continue sending the batches which are in // retry for partitions that were successfully added. pendingPartitionsInTransaction.removeAll(partitions); if (!unauthorizedTopics.isEmpty()) { abortableError(new TopicAuthorizationException(unauthorizedTopics)); } else if (hasPartitionErrors) { abortableError(new KafkaException("Could not add partitions to transaction due to errors: " + errors)); } else { log.debug("Successfully added partitions {} to transaction", partitions); partitionsInTransaction.addAll(partitions); transactionStarted = true; result.done(); } } @Override public long retryBackoffMs() { return Math.min(TransactionManager.this.retryBackoffMs, this.retryBackoffMs); } private void maybeOverrideRetryBackoffMs() { // We only want to reduce the backoff when retrying the first AddPartition which errored out due to a // CONCURRENT_TRANSACTIONS error since this means that the previous transaction is still completing and // we don't want to wait too long before trying to start the new one. // // This is only a temporary fix, the long term solution is being tracked in // https://issues.apache.org/jira/browse/KAFKA-5482 if (partitionsInTransaction.isEmpty()) this.retryBackoffMs = ADD_PARTITIONS_RETRY_BACKOFF_MS; } } private class FindCoordinatorHandler extends TxnRequestHandler { private final FindCoordinatorRequest.Builder builder; private FindCoordinatorHandler(FindCoordinatorRequest.Builder builder) { super("FindCoordinator"); this.builder = builder; } @Override FindCoordinatorRequest.Builder requestBuilder() { return builder; } @Override Priority priority() { return Priority.FIND_COORDINATOR; } @Override FindCoordinatorRequest.CoordinatorType coordinatorType() { return null; } @Override String coordinatorKey() { return null; } @Override public void handleResponse(AbstractResponse response) { CoordinatorType coordinatorType = CoordinatorType.forId(builder.data().keyType()); List<Coordinator> coordinators = ((FindCoordinatorResponse) response).coordinators(); if (coordinators.size() != 1) { log.error("Group coordinator lookup failed: Invalid response containing more than a single coordinator"); fatalError(new IllegalStateException("Group coordinator lookup failed: Invalid response containing more than a single coordinator")); } Coordinator coordinatorData = coordinators.get(0); // For older versions without batching, obtain key from request data since it is not included in response String key = coordinatorData.key() == null ? builder.data().key() : coordinatorData.key(); Errors error = Errors.forCode(coordinatorData.errorCode()); if (error == Errors.NONE) { Node node = new Node(coordinatorData.nodeId(), coordinatorData.host(), coordinatorData.port()); switch (coordinatorType) { case GROUP: consumerGroupCoordinator = node; break; case TRANSACTION: transactionCoordinator = node; } result.done(); log.info("Discovered {} coordinator {}", coordinatorType.toString().toLowerCase(Locale.ROOT), node); } else if (error.exception() instanceof RetriableException) { reenqueue(); } else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED) { fatalError(error.exception()); } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) { abortableError(GroupAuthorizationException.forGroupId(key)); } else { fatalError(new KafkaException(String.format("Could not find a coordinator with type %s with key %s due to " + "unexpected error: %s", coordinatorType, key, coordinatorData.errorMessage()))); } } } private class EndTxnHandler extends TxnRequestHandler { private final EndTxnRequest.Builder builder; private EndTxnHandler(EndTxnRequest.Builder builder) { super("EndTxn(" + builder.data.committed() + ")"); this.builder = builder; } @Override EndTxnRequest.Builder requestBuilder() { return builder; } @Override Priority priority() { return Priority.END_TXN; } @Override boolean isEndTxn() { return true; } @Override public void handleResponse(AbstractResponse response) { EndTxnResponse endTxnResponse = (EndTxnResponse) response; Errors error = endTxnResponse.error(); if (error == Errors.NONE) { completeTransaction(); result.done(); } else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR) { lookupCoordinator(FindCoordinatorRequest.CoordinatorType.TRANSACTION, transactionalId); reenqueue(); } else if (error.exception() instanceof RetriableException) { reenqueue(); } else if (error == Errors.INVALID_PRODUCER_EPOCH || error == Errors.PRODUCER_FENCED) { // We could still receive INVALID_PRODUCER_EPOCH from old versioned transaction coordinator, // just treat it the same as PRODUCE_FENCED. fatalError(Errors.PRODUCER_FENCED.exception()); } else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED) { fatalError(error.exception()); } else if (error == Errors.INVALID_TXN_STATE) { fatalError(error.exception()); } else if (error == Errors.UNKNOWN_PRODUCER_ID || error == Errors.INVALID_PRODUCER_ID_MAPPING) { abortableErrorIfPossible(error.exception()); } else { fatalError(new KafkaException("Unhandled error in EndTxnResponse: " + error.message())); } } } private class AddOffsetsToTxnHandler extends TxnRequestHandler { private final AddOffsetsToTxnRequest.Builder builder; private final Map<TopicPartition, OffsetAndMetadata> offsets; private final ConsumerGroupMetadata groupMetadata; private AddOffsetsToTxnHandler(AddOffsetsToTxnRequest.Builder builder, Map<TopicPartition, OffsetAndMetadata> offsets, ConsumerGroupMetadata groupMetadata) { super("AddOffsetsToTxn"); this.builder = builder; this.offsets = offsets; this.groupMetadata = groupMetadata; } @Override AddOffsetsToTxnRequest.Builder requestBuilder() { return builder; } @Override Priority priority() { return Priority.ADD_PARTITIONS_OR_OFFSETS; } @Override public void handleResponse(AbstractResponse response) { AddOffsetsToTxnResponse addOffsetsToTxnResponse = (AddOffsetsToTxnResponse) response; Errors error = Errors.forCode(addOffsetsToTxnResponse.data().errorCode()); if (error == Errors.NONE) { log.debug("Successfully added partition for consumer group {} to transaction", builder.data.groupId()); // note the result is not completed until the TxnOffsetCommit returns pendingRequests.add(txnOffsetCommitHandler(result, offsets, groupMetadata)); transactionStarted = true; } else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR) { lookupCoordinator(FindCoordinatorRequest.CoordinatorType.TRANSACTION, transactionalId); reenqueue(); } else if (error.exception() instanceof RetriableException) { reenqueue(); } else if (error == Errors.UNKNOWN_PRODUCER_ID || error == Errors.INVALID_PRODUCER_ID_MAPPING) { abortableErrorIfPossible(error.exception()); } else if (error == Errors.INVALID_PRODUCER_EPOCH || error == Errors.PRODUCER_FENCED) { // We could still receive INVALID_PRODUCER_EPOCH from old versioned transaction coordinator, // just treat it the same as PRODUCE_FENCED. fatalError(Errors.PRODUCER_FENCED.exception()); } else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED) { fatalError(error.exception()); } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) { abortableError(GroupAuthorizationException.forGroupId(builder.data.groupId())); } else { fatalError(new KafkaException("Unexpected error in AddOffsetsToTxnResponse: " + error.message())); } } } private class TxnOffsetCommitHandler extends TxnRequestHandler { private final TxnOffsetCommitRequest.Builder builder; private TxnOffsetCommitHandler(TransactionalRequestResult result, TxnOffsetCommitRequest.Builder builder) { super(result); this.builder = builder; } @Override TxnOffsetCommitRequest.Builder requestBuilder() { return builder; } @Override Priority priority() { return Priority.ADD_PARTITIONS_OR_OFFSETS; } @Override FindCoordinatorRequest.CoordinatorType coordinatorType() { return FindCoordinatorRequest.CoordinatorType.GROUP; } @Override String coordinatorKey() { return builder.data.groupId(); } @Override public void handleResponse(AbstractResponse response) { TxnOffsetCommitResponse txnOffsetCommitResponse = (TxnOffsetCommitResponse) response; boolean coordinatorReloaded = false; Map<TopicPartition, Errors> errors = txnOffsetCommitResponse.errors(); log.debug("Received TxnOffsetCommit response for consumer group {}: {}", builder.data.groupId(), errors); for (Map.Entry<TopicPartition, Errors> entry : errors.entrySet()) { TopicPartition topicPartition = entry.getKey(); Errors error = entry.getValue(); if (error == Errors.NONE) { pendingTxnOffsetCommits.remove(topicPartition); } else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR || error == Errors.REQUEST_TIMED_OUT) { if (!coordinatorReloaded) { coordinatorReloaded = true; lookupCoordinator(FindCoordinatorRequest.CoordinatorType.GROUP, builder.data.groupId()); } } else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION || error == Errors.COORDINATOR_LOAD_IN_PROGRESS) { // If the topic is unknown or the coordinator is loading, retry with the current coordinator continue; } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) { abortableError(GroupAuthorizationException.forGroupId(builder.data.groupId())); break; } else if (error == Errors.FENCED_INSTANCE_ID) { abortableError(error.exception()); break; } else if (error == Errors.UNKNOWN_MEMBER_ID || error == Errors.ILLEGAL_GENERATION) { abortableError(new CommitFailedException("Transaction offset Commit failed " + "due to consumer group metadata mismatch: " + error.exception().getMessage())); break; } else if (isFatalException(error)) { fatalError(error.exception()); break; } else { fatalError(new KafkaException("Unexpected error in TxnOffsetCommitResponse: " + error.message())); break; } } if (result.isCompleted()) { pendingTxnOffsetCommits.clear(); } else if (pendingTxnOffsetCommits.isEmpty()) { result.done(); } else { // Retry the commits which failed with a retriable error reenqueue(); } } } private boolean isFatalException(Errors error) { return error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED || error == Errors.INVALID_PRODUCER_EPOCH || error == Errors.PRODUCER_FENCED || error == Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT; } private static final class PendingStateTransition { private final TransactionalRequestResult result; private final State state; private final String operation; private PendingStateTransition( TransactionalRequestResult result, State state, String operation ) { this.result = result; this.state = state; this.operation = operation; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/TransactionalRequestResult.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.TimeoutException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; public final class TransactionalRequestResult { private final CountDownLatch latch; private volatile RuntimeException error = null; private final String operation; private volatile boolean isAcked = false; public TransactionalRequestResult(String operation) { this(new CountDownLatch(1), operation); } private TransactionalRequestResult(CountDownLatch latch, String operation) { this.latch = latch; this.operation = operation; } public void fail(RuntimeException error) { this.error = error; this.latch.countDown(); } public void done() { this.latch.countDown(); } public void await() { this.await(Long.MAX_VALUE, TimeUnit.MILLISECONDS); } public void await(long timeout, TimeUnit unit) { try { boolean success = latch.await(timeout, unit); if (!success) { throw new TimeoutException("Timeout expired after " + unit.toMillis(timeout) + "ms while awaiting " + operation); } isAcked = true; if (error != null) { throw error; } } catch (InterruptedException e) { throw new InterruptException("Received interrupt while awaiting " + operation, e); } } public RuntimeException error() { return error; } public boolean isSuccessful() { return isCompleted() && error == null; } public boolean isCompleted() { return latch.getCount() == 0L; } public boolean isAcked() { return isAcked; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/TxnPartitionEntry.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import java.util.Comparator; import java.util.SortedSet; import java.util.TreeSet; import java.util.function.Consumer; import org.apache.kafka.common.requests.ProduceResponse; import org.apache.kafka.common.utils.ProducerIdAndEpoch; class TxnPartitionEntry { // The producer id/epoch being used for a given partition. ProducerIdAndEpoch producerIdAndEpoch; // The base sequence of the next batch bound for a given partition. int nextSequence; // The sequence number of the last record of the last ack'd batch from the given partition. When there are no // in flight requests for a partition, the lastAckedSequence(topicPartition) == nextSequence(topicPartition) - 1. int lastAckedSequence; // Keep track of the in flight batches bound for a partition, ordered by sequence. This helps us to ensure that // we continue to order batches by the sequence numbers even when the responses come back out of order during // leader failover. We add a batch to the queue when it is drained, and remove it when the batch completes // (either successfully or through a fatal failure). SortedSet<ProducerBatch> inflightBatchesBySequence; // We keep track of the last acknowledged offset on a per partition basis in order to disambiguate UnknownProducer // responses which are due to the retention period elapsing, and those which are due to actual lost data. long lastAckedOffset; // `inflightBatchesBySequence` should only have batches with the same producer id and producer // epoch, but there is an edge case where we may remove the wrong batch if the comparator // only takes `baseSequence` into account. // See https://github.com/apache/kafka/pull/12096#pullrequestreview-955554191 for details. private static final Comparator<ProducerBatch> PRODUCER_BATCH_COMPARATOR = Comparator.comparingLong(ProducerBatch::producerId) .thenComparingInt(ProducerBatch::producerEpoch) .thenComparingInt(ProducerBatch::baseSequence); TxnPartitionEntry() { this.producerIdAndEpoch = ProducerIdAndEpoch.NONE; this.nextSequence = 0; this.lastAckedSequence = TransactionManager.NO_LAST_ACKED_SEQUENCE_NUMBER; this.lastAckedOffset = ProduceResponse.INVALID_OFFSET; this.inflightBatchesBySequence = new TreeSet<>(PRODUCER_BATCH_COMPARATOR); } void resetSequenceNumbers(Consumer<ProducerBatch> resetSequence) { TreeSet<ProducerBatch> newInflights = new TreeSet<>(PRODUCER_BATCH_COMPARATOR); for (ProducerBatch inflightBatch : inflightBatchesBySequence) { resetSequence.accept(inflightBatch); newInflights.add(inflightBatch); } inflightBatchesBySequence = newInflights; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/producer/internals/TxnPartitionMap.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.producer.internals; import java.util.HashMap; import java.util.Map; import java.util.OptionalInt; import java.util.OptionalLong; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.requests.ProduceResponse; import org.apache.kafka.common.utils.PrimitiveRef; import org.apache.kafka.common.utils.ProducerIdAndEpoch; class TxnPartitionMap { final Map<TopicPartition, TxnPartitionEntry> topicPartitions = new HashMap<>(); TxnPartitionEntry get(TopicPartition topicPartition) { TxnPartitionEntry ent = topicPartitions.get(topicPartition); if (ent == null) { throw new IllegalStateException("Trying to get the sequence number for " + topicPartition + ", but the sequence number was never set for this partition."); } return ent; } TxnPartitionEntry getOrCreate(TopicPartition topicPartition) { return topicPartitions.computeIfAbsent(topicPartition, tp -> new TxnPartitionEntry()); } boolean contains(TopicPartition topicPartition) { return topicPartitions.containsKey(topicPartition); } void reset() { topicPartitions.clear(); } OptionalLong lastAckedOffset(TopicPartition topicPartition) { TxnPartitionEntry entry = topicPartitions.get(topicPartition); if (entry != null && entry.lastAckedOffset != ProduceResponse.INVALID_OFFSET) { return OptionalLong.of(entry.lastAckedOffset); } else { return OptionalLong.empty(); } } OptionalInt lastAckedSequence(TopicPartition topicPartition) { TxnPartitionEntry entry = topicPartitions.get(topicPartition); if (entry != null && entry.lastAckedSequence != TransactionManager.NO_LAST_ACKED_SEQUENCE_NUMBER) { return OptionalInt.of(entry.lastAckedSequence); } else { return OptionalInt.empty(); } } void startSequencesAtBeginning(TopicPartition topicPartition, ProducerIdAndEpoch newProducerIdAndEpoch) { final PrimitiveRef.IntRef sequence = PrimitiveRef.ofInt(0); TxnPartitionEntry topicPartitionEntry = get(topicPartition); topicPartitionEntry.resetSequenceNumbers(inFlightBatch -> { inFlightBatch.resetProducerState(newProducerIdAndEpoch, sequence.value, inFlightBatch.isTransactional()); sequence.value += inFlightBatch.recordCount; }); topicPartitionEntry.producerIdAndEpoch = newProducerIdAndEpoch; topicPartitionEntry.nextSequence = sequence.value; topicPartitionEntry.lastAckedSequence = TransactionManager.NO_LAST_ACKED_SEQUENCE_NUMBER; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/Cluster.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; /** * An immutable representation of a subset of the nodes, topics, and partitions in the Kafka cluster. */ public final class Cluster { private final boolean isBootstrapConfigured; private final List<Node> nodes; private final Set<String> unauthorizedTopics; private final Set<String> invalidTopics; private final Set<String> internalTopics; private final Node controller; private final Map<TopicPartition, PartitionInfo> partitionsByTopicPartition; private final Map<String, List<PartitionInfo>> partitionsByTopic; private final Map<String, List<PartitionInfo>> availablePartitionsByTopic; private final Map<Integer, List<PartitionInfo>> partitionsByNode; private final Map<Integer, Node> nodesById; private final ClusterResource clusterResource; private final Map<String, Uuid> topicIds; private final Map<Uuid, String> topicNames; /** * Create a new cluster with the given id, nodes and partitions * @param nodes The nodes in the cluster * @param partitions Information about a subset of the topic-partitions this cluster hosts */ public Cluster(String clusterId, Collection<Node> nodes, Collection<PartitionInfo> partitions, Set<String> unauthorizedTopics, Set<String> internalTopics) { this(clusterId, false, nodes, partitions, unauthorizedTopics, Collections.emptySet(), internalTopics, null, Collections.emptyMap()); } /** * Create a new cluster with the given id, nodes and partitions * @param nodes The nodes in the cluster * @param partitions Information about a subset of the topic-partitions this cluster hosts */ public Cluster(String clusterId, Collection<Node> nodes, Collection<PartitionInfo> partitions, Set<String> unauthorizedTopics, Set<String> internalTopics, Node controller) { this(clusterId, false, nodes, partitions, unauthorizedTopics, Collections.emptySet(), internalTopics, controller, Collections.emptyMap()); } /** * Create a new cluster with the given id, nodes and partitions * @param nodes The nodes in the cluster * @param partitions Information about a subset of the topic-partitions this cluster hosts */ public Cluster(String clusterId, Collection<Node> nodes, Collection<PartitionInfo> partitions, Set<String> unauthorizedTopics, Set<String> invalidTopics, Set<String> internalTopics, Node controller) { this(clusterId, false, nodes, partitions, unauthorizedTopics, invalidTopics, internalTopics, controller, Collections.emptyMap()); } /** * Create a new cluster with the given id, nodes, partitions and topicIds * @param nodes The nodes in the cluster * @param partitions Information about a subset of the topic-partitions this cluster hosts */ public Cluster(String clusterId, Collection<Node> nodes, Collection<PartitionInfo> partitions, Set<String> unauthorizedTopics, Set<String> invalidTopics, Set<String> internalTopics, Node controller, Map<String, Uuid> topicIds) { this(clusterId, false, nodes, partitions, unauthorizedTopics, invalidTopics, internalTopics, controller, topicIds); } private Cluster(String clusterId, boolean isBootstrapConfigured, Collection<Node> nodes, Collection<PartitionInfo> partitions, Set<String> unauthorizedTopics, Set<String> invalidTopics, Set<String> internalTopics, Node controller, Map<String, Uuid> topicIds) { this.isBootstrapConfigured = isBootstrapConfigured; this.clusterResource = new ClusterResource(clusterId); // make a randomized, unmodifiable copy of the nodes List<Node> copy = new ArrayList<>(nodes); Collections.shuffle(copy); this.nodes = Collections.unmodifiableList(copy); // Index the nodes for quick lookup Map<Integer, Node> tmpNodesById = new HashMap<>(); Map<Integer, List<PartitionInfo>> tmpPartitionsByNode = new HashMap<>(nodes.size()); for (Node node : nodes) { tmpNodesById.put(node.id(), node); // Populate the map here to make it easy to add the partitions per node efficiently when iterating over // the partitions tmpPartitionsByNode.put(node.id(), new ArrayList<>()); } this.nodesById = Collections.unmodifiableMap(tmpNodesById); // index the partition infos by topic, topic+partition, and node // note that this code is performance sensitive if there are a large number of partitions so we are careful // to avoid unnecessary work Map<TopicPartition, PartitionInfo> tmpPartitionsByTopicPartition = new HashMap<>(partitions.size()); Map<String, List<PartitionInfo>> tmpPartitionsByTopic = new HashMap<>(); for (PartitionInfo p : partitions) { tmpPartitionsByTopicPartition.put(new TopicPartition(p.topic(), p.partition()), p); tmpPartitionsByTopic.computeIfAbsent(p.topic(), topic -> new ArrayList<>()).add(p); // The leader may not be known if (p.leader() == null || p.leader().isEmpty()) continue; // If it is known, its node information should be available List<PartitionInfo> partitionsForNode = Objects.requireNonNull(tmpPartitionsByNode.get(p.leader().id())); partitionsForNode.add(p); } // Update the values of `tmpPartitionsByNode` to contain unmodifiable lists for (Map.Entry<Integer, List<PartitionInfo>> entry : tmpPartitionsByNode.entrySet()) { tmpPartitionsByNode.put(entry.getKey(), Collections.unmodifiableList(entry.getValue())); } // Populate `tmpAvailablePartitionsByTopic` and update the values of `tmpPartitionsByTopic` to contain // unmodifiable lists Map<String, List<PartitionInfo>> tmpAvailablePartitionsByTopic = new HashMap<>(tmpPartitionsByTopic.size()); for (Map.Entry<String, List<PartitionInfo>> entry : tmpPartitionsByTopic.entrySet()) { String topic = entry.getKey(); List<PartitionInfo> partitionsForTopic = Collections.unmodifiableList(entry.getValue()); tmpPartitionsByTopic.put(topic, partitionsForTopic); // Optimise for the common case where all partitions are available boolean foundUnavailablePartition = partitionsForTopic.stream().anyMatch(p -> p.leader() == null); List<PartitionInfo> availablePartitionsForTopic; if (foundUnavailablePartition) { availablePartitionsForTopic = new ArrayList<>(partitionsForTopic.size()); for (PartitionInfo p : partitionsForTopic) { if (p.leader() != null) availablePartitionsForTopic.add(p); } availablePartitionsForTopic = Collections.unmodifiableList(availablePartitionsForTopic); } else { availablePartitionsForTopic = partitionsForTopic; } tmpAvailablePartitionsByTopic.put(topic, availablePartitionsForTopic); } this.partitionsByTopicPartition = Collections.unmodifiableMap(tmpPartitionsByTopicPartition); this.partitionsByTopic = Collections.unmodifiableMap(tmpPartitionsByTopic); this.availablePartitionsByTopic = Collections.unmodifiableMap(tmpAvailablePartitionsByTopic); this.partitionsByNode = Collections.unmodifiableMap(tmpPartitionsByNode); this.topicIds = Collections.unmodifiableMap(topicIds); Map<Uuid, String> tmpTopicNames = new HashMap<>(); topicIds.forEach((key, value) -> tmpTopicNames.put(value, key)); this.topicNames = Collections.unmodifiableMap(tmpTopicNames); this.unauthorizedTopics = Collections.unmodifiableSet(unauthorizedTopics); this.invalidTopics = Collections.unmodifiableSet(invalidTopics); this.internalTopics = Collections.unmodifiableSet(internalTopics); this.controller = controller; } /** * Create an empty cluster instance with no nodes and no topic-partitions. */ public static Cluster empty() { return new Cluster(null, new ArrayList<>(0), new ArrayList<>(0), Collections.emptySet(), Collections.emptySet(), null); } /** * Create a "bootstrap" cluster using the given list of host/ports * @param addresses The addresses * @return A cluster for these hosts/ports */ public static Cluster bootstrap(List<InetSocketAddress> addresses) { List<Node> nodes = new ArrayList<>(); int nodeId = -1; for (InetSocketAddress address : addresses) nodes.add(new Node(nodeId--, address.getHostString(), address.getPort())); return new Cluster(null, true, nodes, new ArrayList<>(0), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); } /** * Return a copy of this cluster combined with `partitions`. */ public Cluster withPartitions(Map<TopicPartition, PartitionInfo> partitions) { Map<TopicPartition, PartitionInfo> combinedPartitions = new HashMap<>(this.partitionsByTopicPartition); combinedPartitions.putAll(partitions); return new Cluster(clusterResource.clusterId(), this.nodes, combinedPartitions.values(), new HashSet<>(this.unauthorizedTopics), new HashSet<>(this.invalidTopics), new HashSet<>(this.internalTopics), this.controller); } /** * @return The known set of nodes */ public List<Node> nodes() { return this.nodes; } /** * Get the node by the node id (or null if the node is not online or does not exist) * @param id The id of the node * @return The node, or null if the node is not online or does not exist */ public Node nodeById(int id) { return this.nodesById.get(id); } /** * Get the node by node id if the replica for the given partition is online * @param partition * @param id * @return the node */ public Optional<Node> nodeIfOnline(TopicPartition partition, int id) { Node node = nodeById(id); PartitionInfo partitionInfo = partition(partition); if (node != null && partitionInfo != null && !Arrays.asList(partitionInfo.offlineReplicas()).contains(node) && Arrays.asList(partitionInfo.replicas()).contains(node)) { return Optional.of(node); } else { return Optional.empty(); } } /** * Get the current leader for the given topic-partition * @param topicPartition The topic and partition we want to know the leader for * @return The node that is the leader for this topic-partition, or null if there is currently no leader */ public Node leaderFor(TopicPartition topicPartition) { PartitionInfo info = partitionsByTopicPartition.get(topicPartition); if (info == null) return null; else return info.leader(); } /** * Get the metadata for the specified partition * @param topicPartition The topic and partition to fetch info for * @return The metadata about the given topic and partition, or null if none is found */ public PartitionInfo partition(TopicPartition topicPartition) { return partitionsByTopicPartition.get(topicPartition); } /** * Get the list of partitions for this topic * @param topic The topic name * @return A list of partitions */ public List<PartitionInfo> partitionsForTopic(String topic) { return partitionsByTopic.getOrDefault(topic, Collections.emptyList()); } /** * Get the number of partitions for the given topic. * @param topic The topic to get the number of partitions for * @return The number of partitions or null if there is no corresponding metadata */ public Integer partitionCountForTopic(String topic) { List<PartitionInfo> partitions = this.partitionsByTopic.get(topic); return partitions == null ? null : partitions.size(); } /** * Get the list of available partitions for this topic * @param topic The topic name * @return A list of partitions */ public List<PartitionInfo> availablePartitionsForTopic(String topic) { return availablePartitionsByTopic.getOrDefault(topic, Collections.emptyList()); } /** * Get the list of partitions whose leader is this node * @param nodeId The node id * @return A list of partitions */ public List<PartitionInfo> partitionsForNode(int nodeId) { return partitionsByNode.getOrDefault(nodeId, Collections.emptyList()); } /** * Get all topics. * @return a set of all topics */ public Set<String> topics() { return partitionsByTopic.keySet(); } public Set<String> unauthorizedTopics() { return unauthorizedTopics; } public Set<String> invalidTopics() { return invalidTopics; } public Set<String> internalTopics() { return internalTopics; } public boolean isBootstrapConfigured() { return isBootstrapConfigured; } public ClusterResource clusterResource() { return clusterResource; } public Node controller() { return controller; } public Collection<Uuid> topicIds() { return topicIds.values(); } public Uuid topicId(String topic) { return topicIds.getOrDefault(topic, Uuid.ZERO_UUID); } public String topicName(Uuid topicId) { return topicNames.get(topicId); } @Override public String toString() { return "Cluster(id = " + clusterResource.clusterId() + ", nodes = " + this.nodes + ", partitions = " + this.partitionsByTopicPartition.values() + ", controller = " + controller + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return isBootstrapConfigured == cluster.isBootstrapConfigured && Objects.equals(nodes, cluster.nodes) && Objects.equals(unauthorizedTopics, cluster.unauthorizedTopics) && Objects.equals(invalidTopics, cluster.invalidTopics) && Objects.equals(internalTopics, cluster.internalTopics) && Objects.equals(controller, cluster.controller) && Objects.equals(partitionsByTopicPartition, cluster.partitionsByTopicPartition) && Objects.equals(clusterResource, cluster.clusterResource); } @Override public int hashCode() { return Objects.hash(isBootstrapConfigured, nodes, unauthorizedTopics, invalidTopics, internalTopics, controller, partitionsByTopicPartition, clusterResource); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/ClusterResource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.util.Objects; /** * The <code>ClusterResource</code> class encapsulates metadata for a Kafka cluster. */ public class ClusterResource { private final String clusterId; /** * Create {@link ClusterResource} with a cluster id. Note that cluster id may be {@code null} if the * metadata request was sent to a broker without support for cluster ids. The first version of Kafka * to support cluster id is 0.10.1.0. * @param clusterId */ public ClusterResource(String clusterId) { this.clusterId = clusterId; } /** * Return the cluster id. Note that it may be {@code null} if the metadata request was sent to a broker without * support for cluster ids. The first version of Kafka to support cluster id is 0.10.1.0. */ public String clusterId() { return clusterId; } @Override public String toString() { return "ClusterResource(clusterId=" + clusterId + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClusterResource that = (ClusterResource) o; return Objects.equals(clusterId, that.clusterId); } @Override public int hashCode() { return Objects.hash(clusterId); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/ClusterResourceListener.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; /** * A callback interface that users can implement when they wish to get notified about changes in the Cluster metadata. * <p> * Users who need access to cluster metadata in interceptors, metric reporters, serializers and deserializers * can implement this interface. The order of method calls for each of these types is described below. * <p> * <h4>Clients</h4> * There will be one invocation of {@link ClusterResourceListener#onUpdate(ClusterResource)} after each metadata response. * Note that the cluster id may be null when the Kafka broker version is below 0.10.1.0. If you receive a null cluster id, you can expect it to always be null unless you have a cluster with multiple broker versions which can happen if the cluster is being upgraded while the client is running. * <p> * {@link org.apache.kafka.clients.producer.ProducerInterceptor} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked after {@link org.apache.kafka.clients.producer.ProducerInterceptor#onSend(org.apache.kafka.clients.producer.ProducerRecord)} * but before {@link org.apache.kafka.clients.producer.ProducerInterceptor#onAcknowledgement(org.apache.kafka.clients.producer.RecordMetadata, Exception)} . * <p> * {@link org.apache.kafka.clients.consumer.ConsumerInterceptor} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked before {@link org.apache.kafka.clients.consumer.ConsumerInterceptor#onConsume(org.apache.kafka.clients.consumer.ConsumerRecords)} * <p> * {@link org.apache.kafka.common.serialization.Serializer} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked before {@link org.apache.kafka.common.serialization.Serializer#serialize(String, Object)} * <p> * {@link org.apache.kafka.common.serialization.Deserializer} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked before {@link org.apache.kafka.common.serialization.Deserializer#deserialize(String, byte[])} * <p> * {@link org.apache.kafka.common.metrics.MetricsReporter} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked after first {@link org.apache.kafka.clients.producer.KafkaProducer#send(org.apache.kafka.clients.producer.ProducerRecord)} invocation for Producer metrics reporter * and after first {@link org.apache.kafka.clients.consumer.KafkaConsumer#poll(java.time.Duration)} invocation for Consumer metrics * reporters. The reporter may receive metric events from the network layer before this method is invoked. * <h4>Broker</h4> * There is a single invocation {@link ClusterResourceListener#onUpdate(ClusterResource)} on broker start-up and the cluster metadata will never change. * <p> * KafkaMetricsReporter : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked during the bootup of the Kafka broker. The reporter may receive metric events from the network layer before this method is invoked. * <p> * {@link org.apache.kafka.common.metrics.MetricsReporter} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked during the bootup of the Kafka broker. The reporter may receive metric events from the network layer before this method is invoked. */ public interface ClusterResourceListener { /** * A callback method that a user can implement to get updates for {@link ClusterResource}. * @param clusterResource cluster metadata */ void onUpdate(ClusterResource clusterResource); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/Configurable.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.util.Map; /** * A Mix-in style interface for classes that are instantiated by reflection and need to take configuration parameters */ public interface Configurable { /** * Configure this class with the given key-value pairs */ void configure(Map<String, ?> configs); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/ConsumerGroupState.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.util.Arrays; import java.util.Map; import java.util.function.Function; import java.util.stream.Collectors; /** * The consumer group state. */ public enum ConsumerGroupState { UNKNOWN("Unknown"), PREPARING_REBALANCE("PreparingRebalance"), COMPLETING_REBALANCE("CompletingRebalance"), STABLE("Stable"), DEAD("Dead"), EMPTY("Empty"); private final static Map<String, ConsumerGroupState> NAME_TO_ENUM = Arrays.stream(values()) .collect(Collectors.toMap(state -> state.name, Function.identity())); private final String name; ConsumerGroupState(String name) { this.name = name; } /** * Parse a string into a consumer group state. */ public static ConsumerGroupState parse(String name) { ConsumerGroupState state = NAME_TO_ENUM.get(name); return state == null ? UNKNOWN : state; } @Override public String toString() { return name; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/ElectionType.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import org.apache.kafka.common.annotation.InterfaceStability; import java.util.Arrays; import java.util.Set; /** * Options for {@link org.apache.kafka.clients.admin.Admin#electLeaders(ElectionType, Set, org.apache.kafka.clients.admin.ElectLeadersOptions)}. * * The API of this class is evolving, see {@link org.apache.kafka.clients.admin.Admin} for details. */ @InterfaceStability.Evolving public enum ElectionType { PREFERRED((byte) 0), UNCLEAN((byte) 1); public final byte value; ElectionType(byte value) { this.value = value; } public static ElectionType valueOf(byte value) { if (value == PREFERRED.value) { return PREFERRED; } else if (value == UNCLEAN.value) { return UNCLEAN; } else { throw new IllegalArgumentException( String.format("Value %s must be one of %s", value, Arrays.asList(ElectionType.values()))); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/Endpoint.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.util.Objects; import java.util.Optional; import org.apache.kafka.common.annotation.InterfaceStability; import org.apache.kafka.common.security.auth.SecurityProtocol; /** * Represents a broker endpoint. */ @InterfaceStability.Evolving public class Endpoint { private final String listenerName; private final SecurityProtocol securityProtocol; private final String host; private final int port; public Endpoint(String listenerName, SecurityProtocol securityProtocol, String host, int port) { this.listenerName = listenerName; this.securityProtocol = securityProtocol; this.host = host; this.port = port; } /** * Returns the listener name of this endpoint. This is non-empty for endpoints provided * to broker plugins, but may be empty when used in clients. */ public Optional<String> listenerName() { return Optional.ofNullable(listenerName); } /** * Returns the security protocol of this endpoint. */ public SecurityProtocol securityProtocol() { return securityProtocol; } /** * Returns advertised host name of this endpoint. */ public String host() { return host; } /** * Returns the port to which the listener is bound. */ public int port() { return port; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Endpoint)) { return false; } Endpoint that = (Endpoint) o; return Objects.equals(this.listenerName, that.listenerName) && Objects.equals(this.securityProtocol, that.securityProtocol) && Objects.equals(this.host, that.host) && this.port == that.port; } @Override public int hashCode() { return Objects.hash(listenerName, securityProtocol, host, port); } @Override public String toString() { return "Endpoint(" + "listenerName='" + listenerName + '\'' + ", securityProtocol=" + securityProtocol + ", host='" + host + '\'' + ", port=" + port + ')'; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/InvalidRecordException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import org.apache.kafka.common.errors.ApiException; public class InvalidRecordException extends ApiException { private static final long serialVersionUID = 1; public InvalidRecordException(String s) { super(s); } public InvalidRecordException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/IsolationLevel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; public enum IsolationLevel { READ_UNCOMMITTED((byte) 0), READ_COMMITTED((byte) 1); private final byte id; IsolationLevel(byte id) { this.id = id; } public byte id() { return id; } public static IsolationLevel forId(byte id) { switch (id) { case 0: return READ_UNCOMMITTED; case 1: return READ_COMMITTED; default: throw new IllegalArgumentException("Unknown isolation level " + id); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/KafkaException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; /** * The base class of all other Kafka exceptions */ public class KafkaException extends RuntimeException { private final static long serialVersionUID = 1L; public KafkaException(String message, Throwable cause) { super(message, cause); } public KafkaException(String message) { super(message); } public KafkaException(Throwable cause) { super(cause); } public KafkaException() { super(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/KafkaFuture.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import org.apache.kafka.common.internals.KafkaFutureImpl; import java.util.Arrays; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; /** * A flexible future which supports call chaining and other asynchronous programming patterns. * * <h3>Relation to {@code CompletionStage}</h3> * <p>It is possible to obtain a {@code CompletionStage} from a * {@code KafkaFuture} instance by calling {@link #toCompletionStage()}. * If converting {@link KafkaFuture#whenComplete(BiConsumer)} or {@link KafkaFuture#thenApply(BaseFunction)} to * {@link CompletableFuture#whenComplete(java.util.function.BiConsumer)} or * {@link CompletableFuture#thenApply(java.util.function.Function)} be aware that the returned * {@code KafkaFuture} will fail with an {@code ExecutionException}, whereas a {@code CompletionStage} fails * with a {@code CompletionException}. */ public abstract class KafkaFuture<T> implements Future<T> { /** * A function which takes objects of type A and returns objects of type B. */ @FunctionalInterface public interface BaseFunction<A, B> { B apply(A a); } /** * A function which takes objects of type A and returns objects of type B. * * @deprecated Since Kafka 3.0. Use the {@link BaseFunction} functional interface. */ @Deprecated public static abstract class Function<A, B> implements BaseFunction<A, B> { } /** * A consumer of two different types of object. */ @FunctionalInterface public interface BiConsumer<A, B> { void accept(A a, B b); } /** * Returns a new KafkaFuture that is already completed with the given value. */ public static <U> KafkaFuture<U> completedFuture(U value) { KafkaFuture<U> future = new KafkaFutureImpl<>(); future.complete(value); return future; } /** * Returns a new KafkaFuture that is completed when all the given futures have completed. If * any future throws an exception, the returned future returns it. If multiple futures throw * an exception, which one gets returned is arbitrarily chosen. */ public static KafkaFuture<Void> allOf(KafkaFuture<?>... futures) { KafkaFutureImpl<Void> result = new KafkaFutureImpl<>(); CompletableFuture.allOf(Arrays.stream(futures) .map(kafkaFuture -> { // Safe since KafkaFuture's only subclass is KafkaFuture for which toCompletionStage() // always return a CF. return (CompletableFuture<?>) kafkaFuture.toCompletionStage(); }) .toArray(CompletableFuture[]::new)).whenComplete((value, ex) -> { if (ex == null) { result.complete(value); } else { // Have to unwrap the CompletionException which allOf() introduced result.completeExceptionally(ex.getCause()); } }); return result; } /** * Gets a {@code CompletionStage} with the same completion properties as this {@code KafkaFuture}. * The returned instance will complete when this future completes and in the same way * (with the same result or exception). * * <p>Calling {@code toCompletableFuture()} on the returned instance will yield a {@code CompletableFuture}, * but invocation of the completion methods ({@code complete()} and other methods in the {@code complete*()} * and {@code obtrude*()} families) on that {@code CompletableFuture} instance will result in * {@code UnsupportedOperationException} being thrown. Unlike a "minimal" {@code CompletableFuture}, * the {@code get*()} and other methods of {@code CompletableFuture} that are not inherited from * {@code CompletionStage} will work normally. * * <p>If you want to block on the completion of a KafkaFuture you should use * {@link #get()}, {@link #get(long, TimeUnit)} or {@link #getNow(Object)}, rather then calling * {@code .toCompletionStage().toCompletableFuture().get()} etc. * * @since Kafka 3.0 */ public abstract CompletionStage<T> toCompletionStage(); /** * Returns a new KafkaFuture that, when this future completes normally, is executed with this * futures's result as the argument to the supplied function. * * The function may be invoked by the thread that calls {@code thenApply} or it may be invoked by the thread that * completes the future. */ public abstract <R> KafkaFuture<R> thenApply(BaseFunction<T, R> function); /** * Prefer {@link KafkaFuture#thenApply(BaseFunction)} as this function is here for backwards compatibility reasons * and might be deprecated/removed in a future release. */ public abstract <R> KafkaFuture<R> thenApply(Function<T, R> function); /** * Returns a new KafkaFuture with the same result or exception as this future, that executes the given action * when this future completes. * * When this future is done, the given action is invoked with the result (or null if none) and the exception * (or null if none) of this future as arguments. * * The returned future is completed when the action returns. * The supplied action should not throw an exception. However, if it does, the following rules apply: * if this future completed normally but the supplied action throws an exception, then the returned future completes * exceptionally with the supplied action's exception. * Or, if this future completed exceptionally and the supplied action throws an exception, then the returned future * completes exceptionally with this future's exception. * * The action may be invoked by the thread that calls {@code whenComplete} or it may be invoked by the thread that * completes the future. * * @param action the action to preform * @return the new future */ public abstract KafkaFuture<T> whenComplete(BiConsumer<? super T, ? super Throwable> action); /** * If not already completed, sets the value returned by get() and related methods to the given * value. */ protected abstract boolean complete(T newValue); /** * If not already completed, causes invocations of get() and related methods to throw the given * exception. */ protected abstract boolean completeExceptionally(Throwable newException); /** * If not already completed, completes this future with a CancellationException. Dependent * futures that have not already completed will also complete exceptionally, with a * CompletionException caused by this CancellationException. */ @Override public abstract boolean cancel(boolean mayInterruptIfRunning); /** * Waits if necessary for this future to complete, and then returns its result. */ @Override public abstract T get() throws InterruptedException, ExecutionException; /** * Waits if necessary for at most the given time for this future to complete, and then returns * its result, if available. */ @Override public abstract T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException; /** * Returns the result value (or throws any encountered exception) if completed, else returns * the given valueIfAbsent. */ public abstract T getNow(T valueIfAbsent) throws InterruptedException, ExecutionException; /** * Returns true if this CompletableFuture was cancelled before it completed normally. */ @Override public abstract boolean isCancelled(); /** * Returns true if this CompletableFuture completed exceptionally, in any way. */ public abstract boolean isCompletedExceptionally(); /** * Returns true if completed in any fashion: normally, exceptionally, or via cancellation. */ @Override public abstract boolean isDone(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/MessageFormatter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.io.Closeable; import java.io.PrintStream; import java.util.Map; import org.apache.kafka.clients.consumer.ConsumerRecord; /** * This interface allows to define Formatters that can be used to parse and format records read by a * Consumer instance for display. * The kafka-console-consumer has built-in support for MessageFormatter, via the --formatter flag. * * Kafka provides a few implementations to display records of internal topics such as __consumer_offsets, * __transaction_state and the MirrorMaker2 topics. * */ public interface MessageFormatter extends Configurable, Closeable { /** * Configures the MessageFormatter * @param configs Map to configure the formatter */ default void configure(Map<String, ?> configs) {} /** * Parses and formats a record for display * @param consumerRecord the record to format * @param output the print stream used to output the record */ void writeTo(ConsumerRecord<byte[], byte[]> consumerRecord, PrintStream output); /** * Closes the formatter */ default void close() {} }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/Metric.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; /** * A metric tracked for monitoring purposes. */ public interface Metric { /** * A name for this metric */ MetricName metricName(); /** * The value of the metric, which may be measurable or a non-measurable gauge */ Object metricValue(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/MetricName.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.util.Map; import java.util.Objects; /** * The <code>MetricName</code> class encapsulates a metric's name, logical group and its related attributes. It should be constructed using metrics.MetricName(...). * <p> * This class captures the following parameters: * <pre> * <b>name</b> The name of the metric * <b>group</b> logical group name of the metrics to which this metric belongs. * <b>description</b> A human-readable description to include in the metric. This is optional. * <b>tags</b> additional key/value attributes of the metric. This is optional. * </pre> * group, tags parameters can be used to create unique metric names while reporting in JMX or any custom reporting. * <p> * Ex: standard JMX MBean can be constructed like <b>domainName:type=group,key1=val1,key2=val2</b> * <p> * * Usage looks something like this: * <pre>{@code * // set up metrics: * * Map<String, String> metricTags = new LinkedHashMap<String, String>(); * metricTags.put("client-id", "producer-1"); * metricTags.put("topic", "topic"); * * MetricConfig metricConfig = new MetricConfig().tags(metricTags); * Metrics metrics = new Metrics(metricConfig); // this is the global repository of metrics and sensors * * Sensor sensor = metrics.sensor("message-sizes"); * * MetricName metricName = metrics.metricName("message-size-avg", "producer-metrics", "average message size"); * sensor.add(metricName, new Avg()); * * metricName = metrics.metricName("message-size-max", "producer-metrics"); * sensor.add(metricName, new Max()); * * metricName = metrics.metricName("message-size-min", "producer-metrics", "message minimum size", "client-id", "my-client", "topic", "my-topic"); * sensor.add(metricName, new Min()); * * // as messages are sent we record the sizes * sensor.record(messageSize); * }</pre> */ public final class MetricName { private final String name; private final String group; private final String description; private Map<String, String> tags; private int hash = 0; /** * Please create MetricName by method {@link org.apache.kafka.common.metrics.Metrics#metricName(String, String, String, Map)} * * @param name The name of the metric * @param group logical group name of the metrics to which this metric belongs * @param description A human-readable description to include in the metric * @param tags additional key/value attributes of the metric */ public MetricName(String name, String group, String description, Map<String, String> tags) { this.name = Objects.requireNonNull(name); this.group = Objects.requireNonNull(group); this.description = Objects.requireNonNull(description); this.tags = Objects.requireNonNull(tags); } public String name() { return this.name; } public String group() { return this.group; } public Map<String, String> tags() { return this.tags; } public String description() { return this.description; } @Override public int hashCode() { if (hash != 0) return hash; final int prime = 31; int result = 1; result = prime * result + group.hashCode(); result = prime * result + name.hashCode(); result = prime * result + tags.hashCode(); this.hash = result; return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; MetricName other = (MetricName) obj; return group.equals(other.group) && name.equals(other.name) && tags.equals(other.tags); } @Override public String toString() { return "MetricName [name=" + name + ", group=" + group + ", description=" + description + ", tags=" + tags + "]"; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/MetricNameTemplate.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.util.Collections; import java.util.LinkedHashSet; import java.util.Objects; import java.util.Set; /** * A template for a MetricName. It contains a name, group, and description, as * well as all the tags that will be used to create the mBean name. Tag values * are omitted from the template, but are filled in at runtime with their * specified values. The order of the tags is maintained, if an ordered set * is provided, so that the mBean names can be compared and sorted lexicographically. */ public class MetricNameTemplate { private final String name; private final String group; private final String description; private LinkedHashSet<String> tags; /** * Create a new template. Note that the order of the tags will be preserved if the supplied * {@code tagsNames} set has an order. * * @param name the name of the metric; may not be null * @param group the name of the group; may not be null * @param description the description of the metric; may not be null * @param tagsNames the set of metric tag names, which can/should be a set that maintains order; may not be null */ public MetricNameTemplate(String name, String group, String description, Set<String> tagsNames) { this.name = Objects.requireNonNull(name); this.group = Objects.requireNonNull(group); this.description = Objects.requireNonNull(description); this.tags = new LinkedHashSet<>(Objects.requireNonNull(tagsNames)); } /** * Create a new template. Note that the order of the tags will be preserved. * * @param name the name of the metric; may not be null * @param group the name of the group; may not be null * @param description the description of the metric; may not be null * @param tagsNames the names of the metric tags in the preferred order; none of the tag names should be null */ public MetricNameTemplate(String name, String group, String description, String... tagsNames) { this(name, group, description, getTags(tagsNames)); } private static LinkedHashSet<String> getTags(String... keys) { LinkedHashSet<String> tags = new LinkedHashSet<>(); Collections.addAll(tags, keys); return tags; } /** * Get the name of the metric. * * @return the metric name; never null */ public String name() { return this.name; } /** * Get the name of the group. * * @return the group name; never null */ public String group() { return this.group; } /** * Get the description of the metric. * * @return the metric description; never null */ public String description() { return this.description; } /** * Get the set of tag names for the metric. * * @return the ordered set of tag names; never null but possibly empty */ public Set<String> tags() { return tags; } @Override public int hashCode() { return Objects.hash(name, group, tags); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; MetricNameTemplate other = (MetricNameTemplate) o; return Objects.equals(name, other.name) && Objects.equals(group, other.group) && Objects.equals(tags, other.tags); } @Override public String toString() { return String.format("name=%s, group=%s, tags=%s", name, group, tags); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/Node.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.util.Objects; /** * Information about a Kafka node */ public class Node { private static final Node NO_NODE = new Node(-1, "", -1); private final int id; private final String idString; private final String host; private final int port; private final String rack; // Cache hashCode as it is called in performance sensitive parts of the code (e.g. RecordAccumulator.ready) private Integer hash; public Node(int id, String host, int port) { this(id, host, port, null); } public Node(int id, String host, int port, String rack) { this.id = id; this.idString = Integer.toString(id); this.host = host; this.port = port; this.rack = rack; } public static Node noNode() { return NO_NODE; } /** * Check whether this node is empty, which may be the case if noNode() is used as a placeholder * in a response payload with an error. * @return true if it is, false otherwise */ public boolean isEmpty() { return host == null || host.isEmpty() || port < 0; } /** * The node id of this node */ public int id() { return id; } /** * String representation of the node id. * Typically the integer id is used to serialize over the wire, the string representation is used as an identifier with NetworkClient code */ public String idString() { return idString; } /** * The host name for this node */ public String host() { return host; } /** * The port for this node */ public int port() { return port; } /** * True if this node has a defined rack */ public boolean hasRack() { return rack != null; } /** * The rack for this node */ public String rack() { return rack; } @Override public int hashCode() { Integer h = this.hash; if (h == null) { int result = 31 + ((host == null) ? 0 : host.hashCode()); result = 31 * result + id; result = 31 * result + port; result = 31 * result + ((rack == null) ? 0 : rack.hashCode()); this.hash = result; return result; } else { return h; } } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null || getClass() != obj.getClass()) return false; Node other = (Node) obj; return id == other.id && port == other.port && Objects.equals(host, other.host) && Objects.equals(rack, other.rack); } @Override public String toString() { return host + ":" + port + " (id: " + idString + " rack: " + rack + ")"; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/PartitionInfo.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; /** * This is used to describe per-partition state in the MetadataResponse. */ public class PartitionInfo { private final String topic; private final int partition; private final Node leader; private final Node[] replicas; private final Node[] inSyncReplicas; private final Node[] offlineReplicas; public PartitionInfo(String topic, int partition, Node leader, Node[] replicas, Node[] inSyncReplicas) { this(topic, partition, leader, replicas, inSyncReplicas, new Node[0]); } public PartitionInfo(String topic, int partition, Node leader, Node[] replicas, Node[] inSyncReplicas, Node[] offlineReplicas) { this.topic = topic; this.partition = partition; this.leader = leader; this.replicas = replicas; this.inSyncReplicas = inSyncReplicas; this.offlineReplicas = offlineReplicas; } /** * The topic name */ public String topic() { return topic; } /** * The partition id */ public int partition() { return partition; } /** * The node id of the node currently acting as a leader for this partition or null if there is no leader */ public Node leader() { return leader; } /** * The complete set of replicas for this partition regardless of whether they are alive or up-to-date */ public Node[] replicas() { return replicas; } /** * The subset of the replicas that are in sync, that is caught-up to the leader and ready to take over as leader if * the leader should fail */ public Node[] inSyncReplicas() { return inSyncReplicas; } /** * The subset of the replicas that are offline */ public Node[] offlineReplicas() { return offlineReplicas; } @Override public String toString() { return String.format("Partition(topic = %s, partition = %d, leader = %s, replicas = %s, isr = %s, offlineReplicas = %s)", topic, partition, leader == null ? "none" : leader.idString(), formatNodeIds(replicas), formatNodeIds(inSyncReplicas), formatNodeIds(offlineReplicas)); } /* Extract the node ids from each item in the array and format for display */ private String formatNodeIds(Node[] nodes) { StringBuilder b = new StringBuilder("["); if (nodes != null) { for (int i = 0; i < nodes.length; i++) { b.append(nodes[i].idString()); if (i < nodes.length - 1) b.append(','); } } b.append("]"); return b.toString(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/Reconfigurable.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import org.apache.kafka.common.config.ConfigException; import java.util.Map; import java.util.Set; /** * Interface for reconfigurable classes that support dynamic configuration. */ public interface Reconfigurable extends Configurable { /** * Returns the names of configs that may be reconfigured. */ Set<String> reconfigurableConfigs(); /** * Validates the provided configuration. The provided map contains * all configs including any reconfigurable configs that may be different * from the initial configuration. Reconfiguration will be not performed * if this method throws any exception. * @throws ConfigException if the provided configs are not valid. The exception * message from ConfigException will be returned to the client in * the AlterConfigs response. */ void validateReconfiguration(Map<String, ?> configs) throws ConfigException; /** * Reconfigures this instance with the given key-value pairs. The provided * map contains all configs including any reconfigurable configs that * may have changed since the object was initially configured using * {@link Configurable#configure(Map)}. This method will only be invoked if * the configs have passed validation using {@link #validateReconfiguration(Map)}. */ void reconfigure(Map<String, ?> configs); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/TopicCollection.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; /** * A class used to represent a collection of topics. This collection may define topics by name or ID. */ public abstract class TopicCollection { private TopicCollection() {} /** * @return a collection of topics defined by topic ID */ public static TopicIdCollection ofTopicIds(Collection<Uuid> topics) { return new TopicIdCollection(topics); } /** * @return a collection of topics defined by topic name */ public static TopicNameCollection ofTopicNames(Collection<String> topics) { return new TopicNameCollection(topics); } /** * A class used to represent a collection of topics defined by their topic ID. * Subclassing this class beyond the classes provided here is not supported. */ public static class TopicIdCollection extends TopicCollection { private final Collection<Uuid> topicIds; private TopicIdCollection(Collection<Uuid> topicIds) { this.topicIds = new ArrayList<>(topicIds); } /** * @return A collection of topic IDs */ public Collection<Uuid> topicIds() { return Collections.unmodifiableCollection(topicIds); } } /** * A class used to represent a collection of topics defined by their topic name. * Subclassing this class beyond the classes provided here is not supported. */ public static class TopicNameCollection extends TopicCollection { private final Collection<String> topicNames; private TopicNameCollection(Collection<String> topicNames) { this.topicNames = new ArrayList<>(topicNames); } /** * @return A collection of topic names */ public Collection<String> topicNames() { return Collections.unmodifiableCollection(topicNames); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/TopicIdPartition.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.util.Objects; /** * This represents universally unique identifier with topic id for a topic partition. This makes sure that topics * recreated with the same name will always have unique topic identifiers. */ public class TopicIdPartition { private final Uuid topicId; private final TopicPartition topicPartition; /** * Create an instance with the provided parameters. * * @param topicId the topic id * @param topicPartition the topic partition */ public TopicIdPartition(Uuid topicId, TopicPartition topicPartition) { this.topicId = Objects.requireNonNull(topicId, "topicId can not be null"); this.topicPartition = Objects.requireNonNull(topicPartition, "topicPartition can not be null"); } /** * Create an instance with the provided parameters. * * @param topicId the topic id * @param partition the partition id * @param topic the topic name or null */ public TopicIdPartition(Uuid topicId, int partition, String topic) { this.topicId = Objects.requireNonNull(topicId, "topicId can not be null"); this.topicPartition = new TopicPartition(topic, partition); } /** * @return Universally unique id representing this topic partition. */ public Uuid topicId() { return topicId; } /** * @return the topic name or null if it is unknown. */ public String topic() { return topicPartition.topic(); } /** * @return the partition id. */ public int partition() { return topicPartition.partition(); } /** * @return Topic partition representing this instance. */ public TopicPartition topicPartition() { return topicPartition; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TopicIdPartition that = (TopicIdPartition) o; return topicId.equals(that.topicId) && topicPartition.equals(that.topicPartition); } @Override public int hashCode() { final int prime = 31; int result = prime + topicId.hashCode(); result = prime * result + topicPartition.hashCode(); return result; } @Override public String toString() { return topicId + ":" + topic() + "-" + partition(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/TopicPartition.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.io.Serializable; import java.util.Objects; /** * A topic name and partition number */ public final class TopicPartition implements Serializable { private static final long serialVersionUID = -613627415771699627L; private int hash = 0; private final int partition; private final String topic; public TopicPartition(String topic, int partition) { this.partition = partition; this.topic = topic; } public int partition() { return partition; } public String topic() { return topic; } @Override public int hashCode() { if (hash != 0) return hash; final int prime = 31; int result = prime + partition; result = prime * result + Objects.hashCode(topic); this.hash = result; return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; TopicPartition other = (TopicPartition) obj; return partition == other.partition && Objects.equals(topic, other.topic); } @Override public String toString() { return topic + "-" + partition; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/TopicPartitionInfo.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import org.apache.kafka.common.utils.Utils; import java.util.Collections; import java.util.List; import java.util.Objects; /** * A class containing leadership, replicas and ISR information for a topic partition. */ public class TopicPartitionInfo { private final int partition; private final Node leader; private final List<Node> replicas; private final List<Node> isr; /** * Create an instance of this class with the provided parameters. * * @param partition the partition id * @param leader the leader of the partition or {@link Node#noNode()} if there is none. * @param replicas the replicas of the partition in the same order as the replica assignment (the preferred replica * is the head of the list) * @param isr the in-sync replicas */ public TopicPartitionInfo(int partition, Node leader, List<Node> replicas, List<Node> isr) { this.partition = partition; this.leader = leader; this.replicas = Collections.unmodifiableList(replicas); this.isr = Collections.unmodifiableList(isr); } /** * Return the partition id. */ public int partition() { return partition; } /** * Return the leader of the partition or null if there is none. */ public Node leader() { return leader; } /** * Return the replicas of the partition in the same order as the replica assignment. The preferred replica is the * head of the list. * * Brokers with version lower than 0.11.0.0 return the replicas in unspecified order due to a bug. */ public List<Node> replicas() { return replicas; } /** * Return the in-sync replicas of the partition. Note that the ordering of the result is unspecified. */ public List<Node> isr() { return isr; } public String toString() { return "(partition=" + partition + ", leader=" + leader + ", replicas=" + Utils.join(replicas, ", ") + ", isr=" + Utils.join(isr, ", ") + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TopicPartitionInfo that = (TopicPartitionInfo) o; return partition == that.partition && Objects.equals(leader, that.leader) && Objects.equals(replicas, that.replicas) && Objects.equals(isr, that.isr); } @Override public int hashCode() { int result = partition; result = 31 * result + (leader != null ? leader.hashCode() : 0); result = 31 * result + (replicas != null ? replicas.hashCode() : 0); result = 31 * result + (isr != null ? isr.hashCode() : 0); return result; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/TopicPartitionReplica.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.io.Serializable; import java.util.Objects; /** * The topic name, partition number and the brokerId of the replica */ public final class TopicPartitionReplica implements Serializable { private int hash = 0; private final int brokerId; private final int partition; private final String topic; public TopicPartitionReplica(String topic, int partition, int brokerId) { this.topic = Objects.requireNonNull(topic); this.partition = partition; this.brokerId = brokerId; } public String topic() { return topic; } public int partition() { return partition; } public int brokerId() { return brokerId; } @Override public int hashCode() { if (hash != 0) { return hash; } final int prime = 31; int result = 1; result = prime * result + topic.hashCode(); result = prime * result + partition; result = prime * result + brokerId; this.hash = result; return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; TopicPartitionReplica other = (TopicPartitionReplica) obj; return partition == other.partition && brokerId == other.brokerId && topic.equals(other.topic); } @Override public String toString() { return String.format("%s-%d-%d", topic, partition, brokerId); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/Uuid.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.nio.ByteBuffer; import java.util.Base64; /** * This class defines an immutable universally unique identifier (UUID). It represents a 128-bit value. * More specifically, the random UUIDs generated by this class are variant 2 (Leach-Salz) version 4 UUIDs. * This is the same type of UUID as the ones generated by java.util.UUID. The toString() method prints * using the base64 string encoding. Likewise, the fromString method expects a base64 string encoding. */ public class Uuid implements Comparable<Uuid> { /** * A UUID for the metadata topic in KRaft mode. Will never be returned by the randomUuid method. */ public static final Uuid METADATA_TOPIC_ID = new Uuid(0L, 1L); /** * A UUID that represents a null or empty UUID. Will never be returned by the randomUuid method. */ public static final Uuid ZERO_UUID = new Uuid(0L, 0L); private final long mostSignificantBits; private final long leastSignificantBits; /** * Constructs a 128-bit type 4 UUID where the first long represents the most significant 64 bits * and the second long represents the least significant 64 bits. */ public Uuid(long mostSigBits, long leastSigBits) { this.mostSignificantBits = mostSigBits; this.leastSignificantBits = leastSigBits; } private static Uuid unsafeRandomUuid() { java.util.UUID jUuid = java.util.UUID.randomUUID(); return new Uuid(jUuid.getMostSignificantBits(), jUuid.getLeastSignificantBits()); } /** * Static factory to retrieve a type 4 (pseudo randomly generated) UUID. * * This will not generate a UUID equal to 0, 1, or one whose string representation starts with a dash ("-") */ public static Uuid randomUuid() { Uuid uuid = unsafeRandomUuid(); while (uuid.equals(METADATA_TOPIC_ID) || uuid.equals(ZERO_UUID) || uuid.toString().startsWith("-")) { uuid = unsafeRandomUuid(); } return uuid; } /** * Returns the most significant bits of the UUID's 128 value. */ public long getMostSignificantBits() { return this.mostSignificantBits; } /** * Returns the least significant bits of the UUID's 128 value. */ public long getLeastSignificantBits() { return this.leastSignificantBits; } /** * Returns true iff obj is another Uuid represented by the same two long values. */ @Override public boolean equals(Object obj) { if ((null == obj) || (obj.getClass() != this.getClass())) return false; Uuid id = (Uuid) obj; return this.mostSignificantBits == id.mostSignificantBits && this.leastSignificantBits == id.leastSignificantBits; } /** * Returns a hash code for this UUID */ @Override public int hashCode() { long xor = mostSignificantBits ^ leastSignificantBits; return (int) (xor >> 32) ^ (int) xor; } /** * Returns a base64 string encoding of the UUID. */ @Override public String toString() { return Base64.getUrlEncoder().withoutPadding().encodeToString(getBytesFromUuid()); } /** * Creates a UUID based on a base64 string encoding used in the toString() method. */ public static Uuid fromString(String str) { if (str.length() > 24) { throw new IllegalArgumentException("Input string with prefix `" + str.substring(0, 24) + "` is too long to be decoded as a base64 UUID"); } ByteBuffer uuidBytes = ByteBuffer.wrap(Base64.getUrlDecoder().decode(str)); if (uuidBytes.remaining() != 16) { throw new IllegalArgumentException("Input string `" + str + "` decoded as " + uuidBytes.remaining() + " bytes, which is not equal to the expected 16 bytes " + "of a base64-encoded UUID"); } return new Uuid(uuidBytes.getLong(), uuidBytes.getLong()); } private byte[] getBytesFromUuid() { // Extract bytes for uuid which is 128 bits (or 16 bytes) long. ByteBuffer uuidBytes = ByteBuffer.wrap(new byte[16]); uuidBytes.putLong(this.mostSignificantBits); uuidBytes.putLong(this.leastSignificantBits); return uuidBytes.array(); } @Override public int compareTo(Uuid other) { if (mostSignificantBits > other.mostSignificantBits) { return 1; } else if (mostSignificantBits < other.mostSignificantBits) { return -1; } else if (leastSignificantBits > other.leastSignificantBits) { return 1; } else if (leastSignificantBits < other.leastSignificantBits) { return -1; } else { return 0; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides shared functionality for Kafka clients and servers. */ package org.apache.kafka.common;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/acl/AccessControlEntry.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.acl; import org.apache.kafka.common.annotation.InterfaceStability; import java.util.Objects; /** * Represents an access control entry. ACEs are a tuple of principal, host, operation, and permissionType. * * The API for this class is still evolving and we may break compatibility in minor releases, if necessary. */ @InterfaceStability.Evolving public class AccessControlEntry { final AccessControlEntryData data; /** * Create an instance of an access control entry with the provided parameters. * * @param principal non-null principal * @param host non-null host * @param operation non-null operation, ANY is not an allowed operation * @param permissionType non-null permission type, ANY is not an allowed type */ public AccessControlEntry(String principal, String host, AclOperation operation, AclPermissionType permissionType) { Objects.requireNonNull(principal); Objects.requireNonNull(host); Objects.requireNonNull(operation); if (operation == AclOperation.ANY) throw new IllegalArgumentException("operation must not be ANY"); Objects.requireNonNull(permissionType); if (permissionType == AclPermissionType.ANY) throw new IllegalArgumentException("permissionType must not be ANY"); this.data = new AccessControlEntryData(principal, host, operation, permissionType); } /** * Return the principal for this entry. */ public String principal() { return data.principal(); } /** * Return the host or `*` for all hosts. */ public String host() { return data.host(); } /** * Return the AclOperation. This method will never return AclOperation.ANY. */ public AclOperation operation() { return data.operation(); } /** * Return the AclPermissionType. This method will never return AclPermissionType.ANY. */ public AclPermissionType permissionType() { return data.permissionType(); } /** * Create a filter which matches only this AccessControlEntry. */ public AccessControlEntryFilter toFilter() { return new AccessControlEntryFilter(data); } @Override public String toString() { return data.toString(); } /** * Return true if this AclResource has any UNKNOWN components. */ public boolean isUnknown() { return data.isUnknown(); } @Override public boolean equals(Object o) { if (!(o instanceof AccessControlEntry)) return false; AccessControlEntry other = (AccessControlEntry) o; return data.equals(other.data); } @Override public int hashCode() { return data.hashCode(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/acl/AccessControlEntryData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.acl; import java.util.Objects; /** * An internal, private class which contains the data stored in AccessControlEntry and * AccessControlEntryFilter objects. */ class AccessControlEntryData { private final String principal; private final String host; private final AclOperation operation; private final AclPermissionType permissionType; AccessControlEntryData(String principal, String host, AclOperation operation, AclPermissionType permissionType) { this.principal = principal; this.host = host; this.operation = operation; this.permissionType = permissionType; } String principal() { return principal; } String host() { return host; } AclOperation operation() { return operation; } AclPermissionType permissionType() { return permissionType; } /** * Returns a string describing an ANY or UNKNOWN field, or null if there is * no such field. */ public String findIndefiniteField() { if (principal() == null) return "Principal is NULL"; if (host() == null) return "Host is NULL"; if (operation() == AclOperation.ANY) return "Operation is ANY"; if (operation() == AclOperation.UNKNOWN) return "Operation is UNKNOWN"; if (permissionType() == AclPermissionType.ANY) return "Permission type is ANY"; if (permissionType() == AclPermissionType.UNKNOWN) return "Permission type is UNKNOWN"; return null; } @Override public String toString() { return "(principal=" + (principal == null ? "<any>" : principal) + ", host=" + (host == null ? "<any>" : host) + ", operation=" + operation + ", permissionType=" + permissionType + ")"; } /** * Return true if there are any UNKNOWN components. */ boolean isUnknown() { return operation.isUnknown() || permissionType.isUnknown(); } @Override public boolean equals(Object o) { if (!(o instanceof AccessControlEntryData)) return false; AccessControlEntryData other = (AccessControlEntryData) o; return Objects.equals(principal, other.principal) && Objects.equals(host, other.host) && Objects.equals(operation, other.operation) && Objects.equals(permissionType, other.permissionType); } @Override public int hashCode() { return Objects.hash(principal, host, operation, permissionType); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/acl/AccessControlEntryFilter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.acl; import org.apache.kafka.common.annotation.InterfaceStability; import java.util.Objects; /** * Represents a filter which matches access control entries. * * The API for this class is still evolving and we may break compatibility in minor releases, if necessary. */ @InterfaceStability.Evolving public class AccessControlEntryFilter { private final AccessControlEntryData data; /** * Matches any access control entry. */ public static final AccessControlEntryFilter ANY = new AccessControlEntryFilter(null, null, AclOperation.ANY, AclPermissionType.ANY); /** * Create an instance of an access control entry filter with the provided parameters. * * @param principal the principal or null * @param host the host or null * @param operation non-null operation * @param permissionType non-null permission type */ public AccessControlEntryFilter(String principal, String host, AclOperation operation, AclPermissionType permissionType) { Objects.requireNonNull(operation); Objects.requireNonNull(permissionType); this.data = new AccessControlEntryData(principal, host, operation, permissionType); } /** * This is a non-public constructor used in AccessControlEntry#toFilter * * @param data The access control data. */ AccessControlEntryFilter(AccessControlEntryData data) { this.data = data; } /** * Return the principal or null. */ public String principal() { return data.principal(); } /** * Return the host or null. The value `*` means any host. */ public String host() { return data.host(); } /** * Return the AclOperation. */ public AclOperation operation() { return data.operation(); } /** * Return the AclPermissionType. */ public AclPermissionType permissionType() { return data.permissionType(); } @Override public String toString() { return data.toString(); } /** * Return true if there are any UNKNOWN components. */ public boolean isUnknown() { return data.isUnknown(); } /** * Returns true if this filter matches the given AccessControlEntry. */ public boolean matches(AccessControlEntry other) { if ((principal() != null) && (!principal().equals(other.principal()))) return false; if ((host() != null) && (!host().equals(other.host()))) return false; if ((operation() != AclOperation.ANY) && (!operation().equals(other.operation()))) return false; return (permissionType() == AclPermissionType.ANY) || (permissionType().equals(other.permissionType())); } /** * Returns true if this filter could only match one ACE -- in other words, if * there are no ANY or UNKNOWN fields. */ public boolean matchesAtMostOne() { return findIndefiniteField() == null; } /** * Returns a string describing an ANY or UNKNOWN field, or null if there is * no such field. */ public String findIndefiniteField() { return data.findIndefiniteField(); } @Override public boolean equals(Object o) { if (!(o instanceof AccessControlEntryFilter)) return false; AccessControlEntryFilter other = (AccessControlEntryFilter) o; return data.equals(other.data); } @Override public int hashCode() { return data.hashCode(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/acl/AclBinding.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.acl; import org.apache.kafka.common.annotation.InterfaceStability; import org.apache.kafka.common.resource.ResourcePattern; import java.util.Objects; /** * Represents a binding between a resource pattern and an access control entry. * * The API for this class is still evolving and we may break compatibility in minor releases, if necessary. */ @InterfaceStability.Evolving public class AclBinding { private final ResourcePattern pattern; private final AccessControlEntry entry; /** * Create an instance of this class with the provided parameters. * * @param pattern non-null resource pattern. * @param entry non-null entry */ public AclBinding(ResourcePattern pattern, AccessControlEntry entry) { this.pattern = Objects.requireNonNull(pattern, "pattern"); this.entry = Objects.requireNonNull(entry, "entry"); } /** * @return true if this binding has any UNKNOWN components. */ public boolean isUnknown() { return pattern.isUnknown() || entry.isUnknown(); } /** * @return the resource pattern for this binding. */ public ResourcePattern pattern() { return pattern; } /** * @return the access control entry for this binding. */ public final AccessControlEntry entry() { return entry; } /** * Create a filter which matches only this AclBinding. */ public AclBindingFilter toFilter() { return new AclBindingFilter(pattern.toFilter(), entry.toFilter()); } @Override public String toString() { return "(pattern=" + pattern + ", entry=" + entry + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; AclBinding that = (AclBinding) o; return Objects.equals(pattern, that.pattern) && Objects.equals(entry, that.entry); } @Override public int hashCode() { return Objects.hash(pattern, entry); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/acl/AclBindingFilter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.acl; import org.apache.kafka.common.annotation.InterfaceStability; import org.apache.kafka.common.resource.ResourcePatternFilter; import java.util.Objects; /** * A filter which can match AclBinding objects. * * The API for this class is still evolving and we may break compatibility in minor releases, if necessary. */ @InterfaceStability.Evolving public class AclBindingFilter { private final ResourcePatternFilter patternFilter; private final AccessControlEntryFilter entryFilter; /** * A filter which matches any ACL binding. */ public static final AclBindingFilter ANY = new AclBindingFilter(ResourcePatternFilter.ANY, AccessControlEntryFilter.ANY); /** * Create an instance of this filter with the provided parameters. * * @param patternFilter non-null pattern filter * @param entryFilter non-null access control entry filter */ public AclBindingFilter(ResourcePatternFilter patternFilter, AccessControlEntryFilter entryFilter) { this.patternFilter = Objects.requireNonNull(patternFilter, "patternFilter"); this.entryFilter = Objects.requireNonNull(entryFilter, "entryFilter"); } /** * @return {@code true} if this filter has any UNKNOWN components. */ public boolean isUnknown() { return patternFilter.isUnknown() || entryFilter.isUnknown(); } /** * @return the resource pattern filter. */ public ResourcePatternFilter patternFilter() { return patternFilter; } /** * @return the access control entry filter. */ public final AccessControlEntryFilter entryFilter() { return entryFilter; } @Override public String toString() { return "(patternFilter=" + patternFilter + ", entryFilter=" + entryFilter + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; AclBindingFilter that = (AclBindingFilter) o; return Objects.equals(patternFilter, that.patternFilter) && Objects.equals(entryFilter, that.entryFilter); } /** * Return true if the resource and entry filters can only match one ACE. In other words, if * there are no ANY or UNKNOWN fields. */ public boolean matchesAtMostOne() { return patternFilter.matchesAtMostOne() && entryFilter.matchesAtMostOne(); } /** * Return a string describing an ANY or UNKNOWN field, or null if there is no such field. */ public String findIndefiniteField() { String indefinite = patternFilter.findIndefiniteField(); if (indefinite != null) return indefinite; return entryFilter.findIndefiniteField(); } /** * Return true if the resource filter matches the binding's resource and the entry filter matches binding's entry. */ public boolean matches(AclBinding binding) { return patternFilter.matches(binding.pattern()) && entryFilter.matches(binding.entry()); } @Override public int hashCode() { return Objects.hash(patternFilter, entryFilter); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/acl/AclOperation.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.acl; import org.apache.kafka.common.annotation.InterfaceStability; import java.util.HashMap; import java.util.Locale; /** * Represents an operation which an ACL grants or denies permission to perform. * * Some operations imply other operations: * <ul> * <li><code>ALLOW ALL</code> implies <code>ALLOW</code> everything * <li><code>DENY ALL</code> implies <code>DENY</code> everything * * <li><code>ALLOW READ</code> implies <code>ALLOW DESCRIBE</code> * <li><code>ALLOW WRITE</code> implies <code>ALLOW DESCRIBE</code> * <li><code>ALLOW DELETE</code> implies <code>ALLOW DESCRIBE</code> * * <li><code>ALLOW ALTER</code> implies <code>ALLOW DESCRIBE</code> * * <li><code>ALLOW ALTER_CONFIGS</code> implies <code>ALLOW DESCRIBE_CONFIGS</code> * </ul> * The API for this class is still evolving and we may break compatibility in minor releases, if necessary. */ @InterfaceStability.Evolving public enum AclOperation { /** * Represents any AclOperation which this client cannot understand, perhaps because this * client is too old. */ UNKNOWN((byte) 0), /** * In a filter, matches any AclOperation. */ ANY((byte) 1), /** * ALL operation. */ ALL((byte) 2), /** * READ operation. */ READ((byte) 3), /** * WRITE operation. */ WRITE((byte) 4), /** * CREATE operation. */ CREATE((byte) 5), /** * DELETE operation. */ DELETE((byte) 6), /** * ALTER operation. */ ALTER((byte) 7), /** * DESCRIBE operation. */ DESCRIBE((byte) 8), /** * CLUSTER_ACTION operation. */ CLUSTER_ACTION((byte) 9), /** * DESCRIBE_CONFIGS operation. */ DESCRIBE_CONFIGS((byte) 10), /** * ALTER_CONFIGS operation. */ ALTER_CONFIGS((byte) 11), /** * IDEMPOTENT_WRITE operation. */ IDEMPOTENT_WRITE((byte) 12), /** * CREATE_TOKENS operation. */ CREATE_TOKENS((byte) 13), /** * DESCRIBE_TOKENS operation. */ DESCRIBE_TOKENS((byte) 14); // Note: we cannot have more than 30 ACL operations without modifying the format used // to describe ACL operations in MetadataResponse. private final static HashMap<Byte, AclOperation> CODE_TO_VALUE = new HashMap<>(); static { for (AclOperation operation : AclOperation.values()) { CODE_TO_VALUE.put(operation.code, operation); } } /** * Parse the given string as an ACL operation. * * @param str The string to parse. * * @return The AclOperation, or UNKNOWN if the string could not be matched. */ public static AclOperation fromString(String str) throws IllegalArgumentException { try { return AclOperation.valueOf(str.toUpperCase(Locale.ROOT)); } catch (IllegalArgumentException e) { return UNKNOWN; } } /** * Return the AclOperation with the provided code or `AclOperation.UNKNOWN` if one cannot be found. */ public static AclOperation fromCode(byte code) { AclOperation operation = CODE_TO_VALUE.get(code); if (operation == null) { return UNKNOWN; } return operation; } private final byte code; AclOperation(byte code) { this.code = code; } /** * Return the code of this operation. */ public byte code() { return code; } /** * Return true if this operation is UNKNOWN. */ public boolean isUnknown() { return this == UNKNOWN; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/acl/AclPermissionType.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.acl; import org.apache.kafka.common.annotation.InterfaceStability; import java.util.HashMap; import java.util.Locale; /** * Represents whether an ACL grants or denies permissions. * * The API for this class is still evolving and we may break compatibility in minor releases, if necessary. */ @InterfaceStability.Evolving public enum AclPermissionType { /** * Represents any AclPermissionType which this client cannot understand, * perhaps because this client is too old. */ UNKNOWN((byte) 0), /** * In a filter, matches any AclPermissionType. */ ANY((byte) 1), /** * Disallows access. */ DENY((byte) 2), /** * Grants access. */ ALLOW((byte) 3); private final static HashMap<Byte, AclPermissionType> CODE_TO_VALUE = new HashMap<>(); static { for (AclPermissionType permissionType : AclPermissionType.values()) { CODE_TO_VALUE.put(permissionType.code, permissionType); } } /** * Parse the given string as an ACL permission. * * @param str The string to parse. * * @return The AclPermissionType, or UNKNOWN if the string could not be matched. */ public static AclPermissionType fromString(String str) { try { return AclPermissionType.valueOf(str.toUpperCase(Locale.ROOT)); } catch (IllegalArgumentException e) { return UNKNOWN; } } /** * Return the AclPermissionType with the provided code or `AclPermissionType.UNKNOWN` if one cannot be found. */ public static AclPermissionType fromCode(byte code) { AclPermissionType permissionType = CODE_TO_VALUE.get(code); if (permissionType == null) { return UNKNOWN; } return permissionType; } private final byte code; AclPermissionType(byte code) { this.code = code; } /** * Return the code of this permission type. */ public byte code() { return code; } /** * Return true if this permission type is UNKNOWN. */ public boolean isUnknown() { return this == UNKNOWN; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/acl/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides classes representing Access Control Lists for authorization of clients */ package org.apache.kafka.common.acl;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/annotation/InterfaceStability.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.annotation; import java.lang.annotation.Documented; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; /** * Annotation to inform users of how much to rely on a particular package, class or method not changing over time. * Currently the stability can be {@link Stable}, {@link Evolving} or {@link Unstable}. */ @InterfaceStability.Evolving public class InterfaceStability { /** * Compatibility is maintained in major, minor and patch releases with one exception: compatibility may be broken * in a major release (i.e. 0.m) for APIs that have been deprecated for at least one major/minor release cycle. * In cases where the impact of breaking compatibility is significant, there is also a minimum deprecation period * of one year. * * This is the default stability level for public APIs that are not annotated. */ @Documented @Retention(RetentionPolicy.RUNTIME) public @interface Stable { } /** * Compatibility may be broken at minor release (i.e. m.x). */ @Documented @Retention(RetentionPolicy.RUNTIME) public @interface Evolving { } /** * No guarantee is provided as to reliability or stability across any level of release granularity. */ @Documented @Retention(RetentionPolicy.RUNTIME) public @interface Unstable { } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/annotation/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides annotations used on Kafka APIs. */ package org.apache.kafka.common.annotation;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/cache/Cache.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.cache; /** * Interface for caches, semi-persistent maps which store key-value mappings until either an eviction criteria is met * or the entries are manually invalidated. Caches are not required to be thread-safe, but some implementations may be. */ public interface Cache<K, V> { /** * Look up a value in the cache. * @param key the key to * @return the cached value, or null if it is not present. */ V get(K key); /** * Insert an entry into the cache. * @param key the key to insert * @param value the value to insert */ void put(K key, V value); /** * Manually invalidate a key, clearing its entry from the cache. * @param key the key to remove * @return true if the key existed in the cache and the entry was removed or false if it was not present */ boolean remove(K key); /** * Get the number of entries in this cache. If this cache is used by multiple threads concurrently, the returned * value will only be approximate. * @return the number of entries in the cache */ long size(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/cache/LRUCache.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.cache; import java.util.LinkedHashMap; import java.util.Map; /** * A cache implementing a least recently used policy. */ public class LRUCache<K, V> implements Cache<K, V> { private final LinkedHashMap<K, V> cache; public LRUCache(final int maxSize) { cache = new LinkedHashMap<K, V>(16, .75f, true) { @Override protected boolean removeEldestEntry(Map.Entry<K, V> eldest) { return this.size() > maxSize; // require this. prefix to make lgtm.com happy } }; } @Override public V get(K key) { return cache.get(key); } @Override public void put(K key, V value) { cache.put(key, value); } @Override public boolean remove(K key) { return cache.remove(key) != null; } @Override public long size() { return cache.size(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/cache/SynchronizedCache.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.cache; /** * Wrapper for caches that adds simple synchronization to provide a thread-safe cache. Note that this simply adds * synchronization around each cache method on the underlying unsynchronized cache. It does not add any support for * atomically checking for existence of an entry and computing and inserting the value if it is missing. */ public class SynchronizedCache<K, V> implements Cache<K, V> { private final Cache<K, V> underlying; public SynchronizedCache(Cache<K, V> underlying) { this.underlying = underlying; } @Override public synchronized V get(K key) { return underlying.get(key); } @Override public synchronized void put(K key, V value) { underlying.put(key, value); } @Override public synchronized boolean remove(K key) { return underlying.remove(key); } @Override public synchronized long size() { return underlying.size(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/cache/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides reusable implementations of cache primitives. * <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong> */ package org.apache.kafka.common.cache;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/compress/KafkaLZ4BlockInputStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.compress; import net.jpountz.lz4.LZ4Compressor; import net.jpountz.lz4.LZ4Exception; import net.jpountz.lz4.LZ4Factory; import net.jpountz.lz4.LZ4SafeDecompressor; import net.jpountz.xxhash.XXHash32; import net.jpountz.xxhash.XXHashFactory; import org.apache.kafka.common.compress.KafkaLZ4BlockOutputStream.BD; import org.apache.kafka.common.compress.KafkaLZ4BlockOutputStream.FLG; import org.apache.kafka.common.utils.BufferSupplier; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.ByteOrder; import static org.apache.kafka.common.compress.KafkaLZ4BlockOutputStream.LZ4_FRAME_INCOMPRESSIBLE_MASK; import static org.apache.kafka.common.compress.KafkaLZ4BlockOutputStream.MAGIC; /** * A partial implementation of the v1.5.1 LZ4 Frame format. * * @see <a href="https://github.com/lz4/lz4/wiki/lz4_Frame_format.md">LZ4 Frame Format</a> * * This class is not thread-safe. */ public final class KafkaLZ4BlockInputStream extends InputStream { public static final String PREMATURE_EOS = "Stream ended prematurely"; public static final String NOT_SUPPORTED = "Stream unsupported (invalid magic bytes)"; public static final String BLOCK_HASH_MISMATCH = "Block checksum mismatch"; public static final String DESCRIPTOR_HASH_MISMATCH = "Stream frame descriptor corrupted"; private static final LZ4SafeDecompressor DECOMPRESSOR = LZ4Factory.fastestInstance().safeDecompressor(); private static final XXHash32 CHECKSUM = XXHashFactory.fastestInstance().hash32(); private static final RuntimeException BROKEN_LZ4_EXCEPTION; // https://issues.apache.org/jira/browse/KAFKA-9203 // detect buggy lz4 libraries on the classpath static { RuntimeException exception = null; try { detectBrokenLz4Version(); } catch (RuntimeException e) { exception = e; } BROKEN_LZ4_EXCEPTION = exception; } private final ByteBuffer in; private final boolean ignoreFlagDescriptorChecksum; private final BufferSupplier bufferSupplier; private final ByteBuffer decompressionBuffer; // `flg` and `maxBlockSize` are effectively final, they are initialised in the `readHeader` method that is only // invoked from the constructor private FLG flg; private int maxBlockSize; // If a block is compressed, this is the same as `decompressionBuffer`. If a block is not compressed, this is // a slice of `in` to avoid unnecessary copies. private ByteBuffer decompressedBuffer; private boolean finished; /** * Create a new {@link InputStream} that will decompress data using the LZ4 algorithm. * * @param in The byte buffer to decompress * @param ignoreFlagDescriptorChecksum for compatibility with old kafka clients, ignore incorrect HC byte * @throws IOException */ public KafkaLZ4BlockInputStream(ByteBuffer in, BufferSupplier bufferSupplier, boolean ignoreFlagDescriptorChecksum) throws IOException { if (BROKEN_LZ4_EXCEPTION != null) { throw BROKEN_LZ4_EXCEPTION; } this.ignoreFlagDescriptorChecksum = ignoreFlagDescriptorChecksum; this.in = in.duplicate().order(ByteOrder.LITTLE_ENDIAN); this.bufferSupplier = bufferSupplier; readHeader(); decompressionBuffer = bufferSupplier.get(maxBlockSize); finished = false; } /** * Check whether KafkaLZ4BlockInputStream is configured to ignore the * Frame Descriptor checksum, which is useful for compatibility with * old client implementations that use incorrect checksum calculations. */ public boolean ignoreFlagDescriptorChecksum() { return this.ignoreFlagDescriptorChecksum; } /** * Reads the magic number and frame descriptor from input buffer. * * @throws IOException */ private void readHeader() throws IOException { // read first 6 bytes into buffer to check magic and FLG/BD descriptor flags if (in.remaining() < 6) { throw new IOException(PREMATURE_EOS); } if (MAGIC != in.getInt()) { throw new IOException(NOT_SUPPORTED); } // mark start of data to checksum in.mark(); flg = FLG.fromByte(in.get()); maxBlockSize = BD.fromByte(in.get()).getBlockMaximumSize(); if (flg.isContentSizeSet()) { if (in.remaining() < 8) { throw new IOException(PREMATURE_EOS); } in.position(in.position() + 8); } // Final byte of Frame Descriptor is HC checksum // Old implementations produced incorrect HC checksums if (ignoreFlagDescriptorChecksum) { in.position(in.position() + 1); return; } int len = in.position() - in.reset().position(); int hash = CHECKSUM.hash(in, in.position(), len, 0); in.position(in.position() + len); if (in.get() != (byte) ((hash >> 8) & 0xFF)) { throw new IOException(DESCRIPTOR_HASH_MISMATCH); } } /** * Decompresses (if necessary) buffered data, optionally computes and validates a XXHash32 checksum, and writes the * result to a buffer. * * @throws IOException */ private void readBlock() throws IOException { if (in.remaining() < 4) { throw new IOException(PREMATURE_EOS); } int blockSize = in.getInt(); boolean compressed = (blockSize & LZ4_FRAME_INCOMPRESSIBLE_MASK) == 0; blockSize &= ~LZ4_FRAME_INCOMPRESSIBLE_MASK; // Check for EndMark if (blockSize == 0) { finished = true; if (flg.isContentChecksumSet()) in.getInt(); // TODO: verify this content checksum return; } else if (blockSize > maxBlockSize) { throw new IOException(String.format("Block size %d exceeded max: %d", blockSize, maxBlockSize)); } if (in.remaining() < blockSize) { throw new IOException(PREMATURE_EOS); } if (compressed) { try { final int bufferSize = DECOMPRESSOR.decompress(in, in.position(), blockSize, decompressionBuffer, 0, maxBlockSize); decompressionBuffer.position(0); decompressionBuffer.limit(bufferSize); decompressedBuffer = decompressionBuffer; } catch (LZ4Exception e) { throw new IOException(e); } } else { decompressedBuffer = in.slice(); decompressedBuffer.limit(blockSize); } // verify checksum if (flg.isBlockChecksumSet()) { int hash = CHECKSUM.hash(in, in.position(), blockSize, 0); in.position(in.position() + blockSize); if (hash != in.getInt()) { throw new IOException(BLOCK_HASH_MISMATCH); } } else { in.position(in.position() + blockSize); } } @Override public int read() throws IOException { if (finished) { return -1; } if (available() == 0) { readBlock(); } if (finished) { return -1; } return decompressedBuffer.get() & 0xFF; } @Override public int read(byte[] b, int off, int len) throws IOException { net.jpountz.util.SafeUtils.checkRange(b, off, len); if (finished) { return -1; } if (available() == 0) { readBlock(); } if (finished) { return -1; } len = Math.min(len, available()); decompressedBuffer.get(b, off, len); return len; } @Override public long skip(long n) throws IOException { if (finished) { return 0; } if (available() == 0) { readBlock(); } if (finished) { return 0; } int skipped = (int) Math.min(n, available()); decompressedBuffer.position(decompressedBuffer.position() + skipped); return skipped; } @Override public int available() { return decompressedBuffer == null ? 0 : decompressedBuffer.remaining(); } @Override public void close() { bufferSupplier.release(decompressionBuffer); } @Override public void mark(int readlimit) { throw new RuntimeException("mark not supported"); } @Override public void reset() { throw new RuntimeException("reset not supported"); } @Override public boolean markSupported() { return false; } /** * Checks whether the version of lz4 on the classpath has the fix for reading from ByteBuffers with * non-zero array offsets (see https://github.com/lz4/lz4-java/pull/65) */ static void detectBrokenLz4Version() { byte[] source = new byte[]{1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3}; final LZ4Compressor compressor = LZ4Factory.fastestInstance().fastCompressor(); final byte[] compressed = new byte[compressor.maxCompressedLength(source.length)]; final int compressedLength = compressor.compress(source, 0, source.length, compressed, 0, compressed.length); // allocate an array-backed ByteBuffer with non-zero array-offset containing the compressed data // a buggy decompressor will read the data from the beginning of the underlying array instead of // the beginning of the ByteBuffer, failing to decompress the invalid data. final byte[] zeroes = {0, 0, 0, 0, 0}; ByteBuffer nonZeroOffsetBuffer = ByteBuffer .allocate(zeroes.length + compressed.length) // allocates the backing array with extra space to offset the data .put(zeroes) // prepend invalid bytes (zeros) before the compressed data in the array .slice() // create a new ByteBuffer sharing the underlying array, offset to start on the compressed data .put(compressed); // write the compressed data at the beginning of this new buffer ByteBuffer dest = ByteBuffer.allocate(source.length); try { DECOMPRESSOR.decompress(nonZeroOffsetBuffer, 0, compressedLength, dest, 0, source.length); } catch (Exception e) { throw new RuntimeException("Kafka has detected detected a buggy lz4-java library (< 1.4.x) on the classpath." + " If you are using Kafka client libraries, make sure your application does not" + " accidentally override the version provided by Kafka or include multiple versions" + " of the library on the classpath. The lz4-java version on the classpath should" + " match the version the Kafka client libraries depend on. Adding -verbose:class" + " to your JVM arguments may help understand which lz4-java version is getting loaded.", e); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/compress/KafkaLZ4BlockOutputStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.compress; import java.io.IOException; import java.io.OutputStream; import org.apache.kafka.common.utils.ByteUtils; import net.jpountz.lz4.LZ4Compressor; import net.jpountz.lz4.LZ4Factory; import net.jpountz.xxhash.XXHash32; import net.jpountz.xxhash.XXHashFactory; /** * A partial implementation of the v1.5.1 LZ4 Frame format. * * @see <a href="https://github.com/lz4/lz4/wiki/lz4_Frame_format.md">LZ4 Frame Format</a> * * This class is not thread-safe. */ public final class KafkaLZ4BlockOutputStream extends OutputStream { public static final int MAGIC = 0x184D2204; public static final int LZ4_MAX_HEADER_LENGTH = 19; public static final int LZ4_FRAME_INCOMPRESSIBLE_MASK = 0x80000000; public static final String CLOSED_STREAM = "The stream is already closed"; public static final int BLOCKSIZE_64KB = 4; public static final int BLOCKSIZE_256KB = 5; public static final int BLOCKSIZE_1MB = 6; public static final int BLOCKSIZE_4MB = 7; private final LZ4Compressor compressor; private final XXHash32 checksum; private final boolean useBrokenFlagDescriptorChecksum; private final FLG flg; private final BD bd; private final int maxBlockSize; private OutputStream out; private byte[] buffer; private byte[] compressedBuffer; private int bufferOffset; private boolean finished; /** * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm. * * @param out The output stream to compress * @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other * values will generate an exception * @param blockChecksum Default: false. When true, a XXHash32 checksum is computed and appended to the stream for * every block of data * @param useBrokenFlagDescriptorChecksum Default: false. When true, writes an incorrect FrameDescriptor checksum * compatible with older kafka clients. * @throws IOException */ public KafkaLZ4BlockOutputStream(OutputStream out, int blockSize, boolean blockChecksum, boolean useBrokenFlagDescriptorChecksum) throws IOException { this.out = out; compressor = LZ4Factory.fastestInstance().fastCompressor(); checksum = XXHashFactory.fastestInstance().hash32(); this.useBrokenFlagDescriptorChecksum = useBrokenFlagDescriptorChecksum; bd = new BD(blockSize); flg = new FLG(blockChecksum); bufferOffset = 0; maxBlockSize = bd.getBlockMaximumSize(); buffer = new byte[maxBlockSize]; compressedBuffer = new byte[compressor.maxCompressedLength(maxBlockSize)]; finished = false; writeHeader(); } /** * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm. * * @param out The output stream to compress * @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other * values will generate an exception * @param blockChecksum Default: false. When true, a XXHash32 checksum is computed and appended to the stream for * every block of data * @throws IOException */ public KafkaLZ4BlockOutputStream(OutputStream out, int blockSize, boolean blockChecksum) throws IOException { this(out, blockSize, blockChecksum, false); } /** * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm. * * @param out The stream to compress * @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other * values will generate an exception * @throws IOException */ public KafkaLZ4BlockOutputStream(OutputStream out, int blockSize) throws IOException { this(out, blockSize, false, false); } /** * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm. * * @param out The output stream to compress * @throws IOException */ public KafkaLZ4BlockOutputStream(OutputStream out) throws IOException { this(out, BLOCKSIZE_64KB); } public KafkaLZ4BlockOutputStream(OutputStream out, boolean useBrokenHC) throws IOException { this(out, BLOCKSIZE_64KB, false, useBrokenHC); } /** * Check whether KafkaLZ4BlockInputStream is configured to write an * incorrect Frame Descriptor checksum, which is useful for * compatibility with old client implementations. */ public boolean useBrokenFlagDescriptorChecksum() { return this.useBrokenFlagDescriptorChecksum; } /** * Writes the magic number and frame descriptor to the underlying {@link OutputStream}. * * @throws IOException */ private void writeHeader() throws IOException { ByteUtils.writeUnsignedIntLE(buffer, 0, MAGIC); bufferOffset = 4; buffer[bufferOffset++] = flg.toByte(); buffer[bufferOffset++] = bd.toByte(); // TODO write uncompressed content size, update flg.validate() // compute checksum on all descriptor fields int offset = 4; int len = bufferOffset - offset; if (this.useBrokenFlagDescriptorChecksum) { len += offset; offset = 0; } byte hash = (byte) ((checksum.hash(buffer, offset, len, 0) >> 8) & 0xFF); buffer[bufferOffset++] = hash; // write out frame descriptor out.write(buffer, 0, bufferOffset); bufferOffset = 0; } /** * Compresses buffered data, optionally computes an XXHash32 checksum, and writes the result to the underlying * {@link OutputStream}. * * @throws IOException */ private void writeBlock() throws IOException { if (bufferOffset == 0) { return; } int compressedLength = compressor.compress(buffer, 0, bufferOffset, compressedBuffer, 0); byte[] bufferToWrite = compressedBuffer; int compressMethod = 0; // Store block uncompressed if compressed length is greater (incompressible) if (compressedLength >= bufferOffset) { bufferToWrite = buffer; compressedLength = bufferOffset; compressMethod = LZ4_FRAME_INCOMPRESSIBLE_MASK; } // Write content ByteUtils.writeUnsignedIntLE(out, compressedLength | compressMethod); out.write(bufferToWrite, 0, compressedLength); // Calculate and write block checksum if (flg.isBlockChecksumSet()) { int hash = checksum.hash(bufferToWrite, 0, compressedLength, 0); ByteUtils.writeUnsignedIntLE(out, hash); } bufferOffset = 0; } /** * Similar to the {@link #writeBlock()} method. Writes a 0-length block (without block checksum) to signal the end * of the block stream. * * @throws IOException */ private void writeEndMark() throws IOException { ByteUtils.writeUnsignedIntLE(out, 0); // TODO implement content checksum, update flg.validate() } @Override public void write(int b) throws IOException { ensureNotFinished(); if (bufferOffset == maxBlockSize) { writeBlock(); } buffer[bufferOffset++] = (byte) b; } @Override public void write(byte[] b, int off, int len) throws IOException { net.jpountz.util.SafeUtils.checkRange(b, off, len); ensureNotFinished(); int bufferRemainingLength = maxBlockSize - bufferOffset; // while b will fill the buffer while (len > bufferRemainingLength) { // fill remaining space in buffer System.arraycopy(b, off, buffer, bufferOffset, bufferRemainingLength); bufferOffset = maxBlockSize; writeBlock(); // compute new offset and length off += bufferRemainingLength; len -= bufferRemainingLength; bufferRemainingLength = maxBlockSize; } System.arraycopy(b, off, buffer, bufferOffset, len); bufferOffset += len; } @Override public void flush() throws IOException { if (!finished) { writeBlock(); } if (out != null) { out.flush(); } } /** * A simple state check to ensure the stream is still open. */ private void ensureNotFinished() { if (finished) { throw new IllegalStateException(CLOSED_STREAM); } } @Override public void close() throws IOException { try { if (!finished) { // basically flush the buffer writing the last block writeBlock(); // write the end block writeEndMark(); } } finally { try { if (out != null) { try (OutputStream outStream = out) { outStream.flush(); } } } finally { out = null; buffer = null; compressedBuffer = null; finished = true; } } } public static class FLG { private static final int VERSION = 1; private final int reserved; private final int contentChecksum; private final int contentSize; private final int blockChecksum; private final int blockIndependence; private final int version; public FLG() { this(false); } public FLG(boolean blockChecksum) { this(0, 0, 0, blockChecksum ? 1 : 0, 1, VERSION); } private FLG(int reserved, int contentChecksum, int contentSize, int blockChecksum, int blockIndependence, int version) { this.reserved = reserved; this.contentChecksum = contentChecksum; this.contentSize = contentSize; this.blockChecksum = blockChecksum; this.blockIndependence = blockIndependence; this.version = version; validate(); } public static FLG fromByte(byte flg) { int reserved = (flg >>> 0) & 3; int contentChecksum = (flg >>> 2) & 1; int contentSize = (flg >>> 3) & 1; int blockChecksum = (flg >>> 4) & 1; int blockIndependence = (flg >>> 5) & 1; int version = (flg >>> 6) & 3; return new FLG(reserved, contentChecksum, contentSize, blockChecksum, blockIndependence, version); } public byte toByte() { return (byte) (((reserved & 3) << 0) | ((contentChecksum & 1) << 2) | ((contentSize & 1) << 3) | ((blockChecksum & 1) << 4) | ((blockIndependence & 1) << 5) | ((version & 3) << 6)); } private void validate() { if (reserved != 0) { throw new RuntimeException("Reserved bits must be 0"); } if (blockIndependence != 1) { throw new RuntimeException("Dependent block stream is unsupported"); } if (version != VERSION) { throw new RuntimeException(String.format("Version %d is unsupported", version)); } } public boolean isContentChecksumSet() { return contentChecksum == 1; } public boolean isContentSizeSet() { return contentSize == 1; } public boolean isBlockChecksumSet() { return blockChecksum == 1; } public boolean isBlockIndependenceSet() { return blockIndependence == 1; } public int getVersion() { return version; } } public static class BD { private final int reserved2; private final int blockSizeValue; private final int reserved3; public BD() { this(0, BLOCKSIZE_64KB, 0); } public BD(int blockSizeValue) { this(0, blockSizeValue, 0); } private BD(int reserved2, int blockSizeValue, int reserved3) { this.reserved2 = reserved2; this.blockSizeValue = blockSizeValue; this.reserved3 = reserved3; validate(); } public static BD fromByte(byte bd) { int reserved2 = (bd >>> 0) & 15; int blockMaximumSize = (bd >>> 4) & 7; int reserved3 = (bd >>> 7) & 1; return new BD(reserved2, blockMaximumSize, reserved3); } private void validate() { if (reserved2 != 0) { throw new RuntimeException("Reserved2 field must be 0"); } if (blockSizeValue < 4 || blockSizeValue > 7) { throw new RuntimeException("Block size value must be between 4 and 7"); } if (reserved3 != 0) { throw new RuntimeException("Reserved3 field must be 0"); } } // 2^(2n+8) public int getBlockMaximumSize() { return 1 << ((2 * blockSizeValue) + 8); } public byte toByte() { return (byte) (((reserved2 & 15) << 0) | ((blockSizeValue & 7) << 4) | ((reserved3 & 1) << 7)); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/compress/SnappyFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.compress; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.utils.ByteBufferInputStream; import org.apache.kafka.common.utils.ByteBufferOutputStream; import org.xerial.snappy.SnappyInputStream; import org.xerial.snappy.SnappyOutputStream; import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; public class SnappyFactory { private SnappyFactory() { } public static OutputStream wrapForOutput(ByteBufferOutputStream buffer) { try { return new SnappyOutputStream(buffer); } catch (Throwable e) { throw new KafkaException(e); } } public static InputStream wrapForInput(ByteBuffer buffer) { try { return new SnappyInputStream(new ByteBufferInputStream(buffer)); } catch (Throwable e) { throw new KafkaException(e); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/compress/ZstdFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.compress; import com.github.luben.zstd.BufferPool; import com.github.luben.zstd.RecyclingBufferPool; import com.github.luben.zstd.ZstdInputStreamNoFinalizer; import com.github.luben.zstd.ZstdOutputStreamNoFinalizer; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.ByteBufferInputStream; import org.apache.kafka.common.utils.ByteBufferOutputStream; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; public class ZstdFactory { private ZstdFactory() { } public static OutputStream wrapForOutput(ByteBufferOutputStream buffer) { try { // Set input buffer (uncompressed) to 16 KB (none by default) to ensure reasonable performance // in cases where the caller passes a small number of bytes to write (potentially a single byte). return new BufferedOutputStream(new ZstdOutputStreamNoFinalizer(buffer, RecyclingBufferPool.INSTANCE), 16 * 1024); } catch (Throwable e) { throw new KafkaException(e); } } public static InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { try { // We use our own BufferSupplier instead of com.github.luben.zstd.RecyclingBufferPool since our // implementation doesn't require locking or soft references. BufferPool bufferPool = new BufferPool() { @Override public ByteBuffer get(int capacity) { return decompressionBufferSupplier.get(capacity); } @Override public void release(ByteBuffer buffer) { decompressionBufferSupplier.release(buffer); } }; // Set output buffer (uncompressed) to 16 KB (none by default) to ensure reasonable performance // in cases where the caller reads a small number of bytes (potentially a single byte). return new BufferedInputStream(new ZstdInputStreamNoFinalizer(new ByteBufferInputStream(buffer), bufferPool), 16 * 1024); } catch (Throwable e) { throw new KafkaException(e); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/compress/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides mechanisms for compressing data handled by Kafka. * <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong> */ package org.apache.kafka.common.compress;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/AbstractConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import org.apache.kafka.common.Configurable; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.config.provider.ConfigProvider; import org.apache.kafka.common.config.types.Password; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; // ** Added by Superstream import org.apache.kafka.common.superstream.Superstream; import static org.apache.kafka.common.superstream.Consts.CLIENT_TYPES_LIST; import java.util.*; // Added by Superstream ** import java.util.concurrent.ConcurrentHashMap; /** * A convenient base class for configurations to extend. * <p> * This class holds both the original configuration that was provided as well as * the parsed */ public class AbstractConfig { private final Logger log = LoggerFactory.getLogger(getClass()); /** * Configs for which values have been requested, used to detect unused configs. * This set must be concurrent modifiable and iterable. It will be modified * when directly accessed or as a result of RecordingMap access. */ private final Set<String> used = ConcurrentHashMap.newKeySet(); /* the original values passed in by the user */ private final Map<String, ?> originals; /* the parsed values */ private final Map<String, Object> values; private final ConfigDef definition; public static final String CONFIG_PROVIDERS_CONFIG = "config.providers"; private static final String CONFIG_PROVIDERS_PARAM = ".param."; /** * Construct a configuration with a ConfigDef and the configuration properties, * which can include properties * for zero or more {@link ConfigProvider} that will be used to resolve * variables in configuration property * values. * <p> * The originals is a name-value pair configuration properties and optional * config provider configs. The * value of the configuration can be a variable as defined below or the actual * value. This constructor will * first instantiate the ConfigProviders using the config provider configs, then * it will find all the * variables in the values of the originals configurations, attempt to resolve * the variables using the named * ConfigProviders, and then parse and validate the configurations. * <p> * ConfigProvider configs can be passed either as configs in the originals map * or in the separate * configProviderProps map. If config providers properties are passed in the * configProviderProps any config * provider properties in originals map will be ignored. If ConfigProvider * properties are not provided, the * constructor will skip the variable substitution step and will simply validate * and parse the supplied * configuration. * <p> * The "{@code config.providers}" configuration property and all configuration * properties that begin with the * "{@code config.providers.}" prefix are reserved. The * "{@code config.providers}" configuration property * specifies the names of the config providers, and properties that begin with * the "{@code config.providers..}" * prefix correspond to the properties for that named provider. For example, the * "{@code config.providers..class}" * property specifies the name of the {@link ConfigProvider} implementation * class that should be used for * the provider. * <p> * The keys for ConfigProvider configs in both originals and configProviderProps * will start with the above * mentioned "{@code config.providers.}" prefix. * <p> * Variables have the form "${providerName:[path:]key}", where "providerName" is * the name of a ConfigProvider, * "path" is an optional string, and "key" is a required string. This variable * is resolved by passing the "key" * and optional "path" to a ConfigProvider with the specified name, and the * result from the ConfigProvider is * then used in place of the variable. Variables that cannot be resolved by the * AbstractConfig constructor will * be left unchanged in the configuration. * * @param definition the definition of the configurations; may not be * null * @param originals the configuration properties plus any optional * config provider properties; * @param configProviderProps the map of properties of config providers which * will be instantiated by * the constructor to resolve any variables in * {@code originals}; may be null or empty * @param doLog whether the configurations should be logged */ @SuppressWarnings("unchecked") // ** type Added by Superstream public AbstractConfig(ConfigDef definition, Map<?, ?> originals, Map<String, ?> configProviderProps, boolean doLog, String type) { /* check that all the keys are really strings */ for (Map.Entry<?, ?> entry : originals.entrySet()) if (!(entry.getKey() instanceof String)) throw new ConfigException(entry.getKey().toString(), entry.getValue(), "Key must be a string."); // ** Added by Superstream if (type != null && Arrays.asList(CLIENT_TYPES_LIST).contains(type.toLowerCase())) { originals = Superstream.initSuperstreamConfig((Map<String, Object>) originals, type); } // Added by Superstream ** this.originals = resolveConfigVariables(configProviderProps, (Map<String, Object>) originals); this.values = definition.parse(this.originals); Map<String, Object> configUpdates = postProcessParsedConfig(Collections.unmodifiableMap(this.values)); for (Map.Entry<String, Object> update : configUpdates.entrySet()) { this.values.put(update.getKey(), update.getValue()); } definition.parse(this.values); this.definition = definition; if (doLog) logAll(); } public AbstractConfig(ConfigDef definition, Map<?, ?> originals, Map<String, ?> configProviderProps, boolean doLog) { this(definition, originals, configProviderProps, doLog, ""); } /** * Construct a configuration with a ConfigDef and the configuration properties, * which can include properties for zero or more {@link ConfigProvider} * that will be used to resolve variables in configuration property values. * * @param definition the definition of the configurations; may not be null * @param originals the configuration properties plus any optional config * provider properties; may not be null */ public AbstractConfig(ConfigDef definition, Map<?, ?> originals) { this(definition, originals, Collections.emptyMap(), true); } // ** Added by Superstream public AbstractConfig(ConfigDef definition, Map<?, ?> originals, String type) { this(definition, originals, Collections.emptyMap(), true, type); } // Added by Superstream ** /** * Construct a configuration with a ConfigDef and the configuration properties, * which can include properties for zero or more {@link ConfigProvider} * that will be used to resolve variables in configuration property values. * * @param definition the definition of the configurations; may not be null * @param originals the configuration properties plus any optional config * provider properties; may not be null * @param doLog whether the configurations should be logged */ public AbstractConfig(ConfigDef definition, Map<?, ?> originals, boolean doLog) { this(definition, originals, Collections.emptyMap(), doLog); } // ** Added by Superstream public AbstractConfig(ConfigDef definition, Map<?, ?> originals, boolean doLog, String type) { this(definition, originals, Collections.emptyMap(), doLog, type); } // Added by Superstream ** /** * Called directly after user configs got parsed (and thus default values got * set). * This allows to change default values for "secondary defaults" if required. * * @param parsedValues unmodifiable map of current configuration * @return a map of updates that should be applied to the configuration (will be * validated to prevent bad updates) */ protected Map<String, Object> postProcessParsedConfig(Map<String, Object> parsedValues) { return Collections.emptyMap(); } protected Object get(String key) { if (!values.containsKey(key)) throw new ConfigException(String.format("Unknown configuration '%s'", key)); used.add(key); return values.get(key); } public void ignore(String key) { used.add(key); } public Short getShort(String key) { return (Short) get(key); } public Integer getInt(String key) { return (Integer) get(key); } public Long getLong(String key) { return (Long) get(key); } public Double getDouble(String key) { return (Double) get(key); } @SuppressWarnings("unchecked") public List<String> getList(String key) { return (List<String>) get(key); } public Boolean getBoolean(String key) { return (Boolean) get(key); } public String getString(String key) { return (String) get(key); } public ConfigDef.Type typeOf(String key) { ConfigDef.ConfigKey configKey = definition.configKeys().get(key); if (configKey == null) return null; return configKey.type; } public String documentationOf(String key) { ConfigDef.ConfigKey configKey = definition.configKeys().get(key); if (configKey == null) return null; return configKey.documentation; } public Password getPassword(String key) { return (Password) get(key); } public Class<?> getClass(String key) { return (Class<?>) get(key); } public Set<String> unused() { Set<String> keys = new HashSet<>(originals.keySet()); keys.removeAll(used); return keys; } public Map<String, Object> originals() { Map<String, Object> copy = new RecordingMap<>(); copy.putAll(originals); return copy; } public Map<String, Object> originals(Map<String, Object> configOverrides) { Map<String, Object> copy = new RecordingMap<>(); copy.putAll(originals); copy.putAll(configOverrides); return copy; } /** * Get all the original settings, ensuring that all values are of type String. * * @return the original settings * @throws ClassCastException if any of the values are not strings */ public Map<String, String> originalsStrings() { Map<String, String> copy = new RecordingMap<>(); for (Map.Entry<String, ?> entry : originals.entrySet()) { if (!(entry.getValue() instanceof String)) throw new ClassCastException("Non-string value found in original settings for key " + entry.getKey() + ": " + (entry.getValue() == null ? null : entry.getValue().getClass().getName())); copy.put(entry.getKey(), (String) entry.getValue()); } return copy; } /** * Gets all original settings with the given prefix, stripping the prefix before * adding it to the output. * * @param prefix the prefix to use as a filter * @return a Map containing the settings with the prefix */ public Map<String, Object> originalsWithPrefix(String prefix) { return originalsWithPrefix(prefix, true); } /** * Gets all original settings with the given prefix. * * @param prefix the prefix to use as a filter * @param strip strip the prefix before adding to the output if set true * @return a Map containing the settings with the prefix */ public Map<String, Object> originalsWithPrefix(String prefix, boolean strip) { Map<String, Object> result = new RecordingMap<>(prefix, false); result.putAll(Utils.entriesWithPrefix(originals, prefix, strip)); return result; } /** * Put all keys that do not start with {@code prefix} and their parsed values in * the result map and then * put all the remaining keys with the prefix stripped and their parsed values * in the result map. * <p> * This is useful if one wants to allow prefixed configs to override default * ones. * <p> * Two forms of prefixes are supported: * <ul> * <li>listener.name.{listenerName}.some.prop: If the provided prefix is * `listener.name.{listenerName}.`, * the key `some.prop` with the value parsed using the definition of `some.prop` * is returned.</li> * <li>listener.name.{listenerName}.{mechanism}.some.prop: If the provided * prefix is `listener.name.{listenerName}.`, * the key `{mechanism}.some.prop` with the value parsed using the definition of * `some.prop` is returned. * This is used to provide per-mechanism configs for a broker listener (e.g * sasl.jaas.config)</li> * </ul> * </p> */ public Map<String, Object> valuesWithPrefixOverride(String prefix) { Map<String, Object> result = new RecordingMap<>(values(), prefix, true); for (Map.Entry<String, ?> entry : originals.entrySet()) { if (entry.getKey().startsWith(prefix) && entry.getKey().length() > prefix.length()) { String keyWithNoPrefix = entry.getKey().substring(prefix.length()); ConfigDef.ConfigKey configKey = definition.configKeys().get(keyWithNoPrefix); if (configKey != null) result.put(keyWithNoPrefix, definition.parseValue(configKey, entry.getValue(), true)); else { String keyWithNoSecondaryPrefix = keyWithNoPrefix.substring(keyWithNoPrefix.indexOf('.') + 1); configKey = definition.configKeys().get(keyWithNoSecondaryPrefix); if (configKey != null) result.put(keyWithNoPrefix, definition.parseValue(configKey, entry.getValue(), true)); } } } return result; } /** * If at least one key with {@code prefix} exists, all prefixed values will be * parsed and put into map. * If no value with {@code prefix} exists all unprefixed values will be * returned. * <p> * This is useful if one wants to allow prefixed configs to override default * ones, but wants to use either * only prefixed configs or only regular configs, but not mix them. */ public Map<String, Object> valuesWithPrefixAllOrNothing(String prefix) { Map<String, Object> withPrefix = originalsWithPrefix(prefix, true); if (withPrefix.isEmpty()) { return new RecordingMap<>(values(), "", true); } else { Map<String, Object> result = new RecordingMap<>(prefix, true); for (Map.Entry<String, ?> entry : withPrefix.entrySet()) { ConfigDef.ConfigKey configKey = definition.configKeys().get(entry.getKey()); if (configKey != null) result.put(entry.getKey(), definition.parseValue(configKey, entry.getValue(), true)); } return result; } } public Map<String, ?> values() { return new RecordingMap<>(values); } public Map<String, ?> nonInternalValues() { Map<String, Object> nonInternalConfigs = new RecordingMap<>(); values.forEach((key, value) -> { ConfigDef.ConfigKey configKey = definition.configKeys().get(key); if (configKey == null || !configKey.internalConfig) { nonInternalConfigs.put(key, value); } }); return nonInternalConfigs; } private void logAll() { StringBuilder b = new StringBuilder(); b.append(getClass().getSimpleName()); b.append(" values: "); b.append(Utils.NL); for (Map.Entry<String, Object> entry : new TreeMap<>(this.values).entrySet()) { b.append('\t'); b.append(entry.getKey()); b.append(" = "); b.append(entry.getValue()); b.append(Utils.NL); } log.info(b.toString()); } /** * Info level log for any unused configurations */ public void logUnused() { Set<String> unusedkeys = unused(); if (!unusedkeys.isEmpty()) { log.info("These configurations '{}' were supplied but are not used yet.", unusedkeys); } } private <T> T getConfiguredInstance(Object klass, Class<T> t, Map<String, Object> configPairs) { if (klass == null) return null; Object o; if (klass instanceof String) { try { o = Utils.newInstance((String) klass, t); } catch (ClassNotFoundException e) { throw new KafkaException("Class " + klass + " cannot be found", e); } } else if (klass instanceof Class<?>) { o = Utils.newInstance((Class<?>) klass); } else throw new KafkaException( "Unexpected element of type " + klass.getClass().getName() + ", expected String or Class"); try { if (!t.isInstance(o)) throw new KafkaException(klass + " is not an instance of " + t.getName()); if (o instanceof Configurable) ((Configurable) o).configure(configPairs); } catch (Exception e) { maybeClose(o, "AutoCloseable object constructed and configured during failed call to getConfiguredInstance"); throw e; } return t.cast(o); } /** * Get a configured instance of the give class specified by the given * configuration key. If the object implements * Configurable configure it using the configuration. * * @param key The configuration key for the class * @param t The interface the class should implement * @return A configured instance of the class */ public <T> T getConfiguredInstance(String key, Class<T> t) { return getConfiguredInstance(key, t, Collections.emptyMap()); } /** * Get a configured instance of the give class specified by the given * configuration key. If the object implements * Configurable configure it using the configuration. * * @param key The configuration key for the class * @param t The interface the class should implement * @param configOverrides override origin configs * @return A configured instance of the class */ public <T> T getConfiguredInstance(String key, Class<T> t, Map<String, Object> configOverrides) { Class<?> c = getClass(key); return getConfiguredInstance(c, t, originals(configOverrides)); } /** * Get a list of configured instances of the given class specified by the given * configuration key. The configuration * may specify either null or an empty string to indicate no configured * instances. In both cases, this method * returns an empty list to indicate no configured instances. * * @param key The configuration key for the class * @param t The interface the class should implement * @return The list of configured instances */ public <T> List<T> getConfiguredInstances(String key, Class<T> t) { return getConfiguredInstances(key, t, Collections.emptyMap()); } /** * Get a list of configured instances of the given class specified by the given * configuration key. The configuration * may specify either null or an empty string to indicate no configured * instances. In both cases, this method * returns an empty list to indicate no configured instances. * * @param key The configuration key for the class * @param t The interface the class should implement * @param configOverrides Configuration overrides to use. * @return The list of configured instances */ public <T> List<T> getConfiguredInstances(String key, Class<T> t, Map<String, Object> configOverrides) { return getConfiguredInstances(getList(key), t, configOverrides); } /** * Get a list of configured instances of the given class specified by the given * configuration key. The configuration * may specify either null or an empty string to indicate no configured * instances. In both cases, this method * returns an empty list to indicate no configured instances. * * @param classNames The list of class names of the instances to create * @param t The interface the class should implement * @param configOverrides Configuration overrides to use. * @return The list of configured instances */ public <T> List<T> getConfiguredInstances(List<String> classNames, Class<T> t, Map<String, Object> configOverrides) { List<T> objects = new ArrayList<>(); if (classNames == null) return objects; Map<String, Object> configPairs = originals(); configPairs.putAll(configOverrides); try { for (Object klass : classNames) { Object o = getConfiguredInstance(klass, t, configPairs); objects.add(t.cast(o)); } } catch (Exception e) { for (Object object : objects) { maybeClose(object, "AutoCloseable object constructed and configured during failed call to getConfiguredInstances"); } throw e; } return objects; } private static void maybeClose(Object object, String name) { if (object instanceof AutoCloseable) { Utils.closeQuietly((AutoCloseable) object, name); } } private Map<String, String> extractPotentialVariables(Map<?, ?> configMap) { // Variables are tuples of the form "${providerName:[path:]key}". From the // configMap we extract the subset of configs with string // values as potential variables. Map<String, String> configMapAsString = new HashMap<>(); for (Map.Entry<?, ?> entry : configMap.entrySet()) { if (entry.getValue() instanceof String) configMapAsString.put((String) entry.getKey(), (String) entry.getValue()); } return configMapAsString; } /** * Instantiates given list of config providers and fetches the actual values of * config variables from the config providers. * returns a map of config key and resolved values. * * @param configProviderProps The map of config provider configs * @param originals The map of raw configs. * @return map of resolved config variable. */ private Map<String, ?> resolveConfigVariables(Map<String, ?> configProviderProps, Map<String, Object> originals) { Map<String, String> providerConfigString; Map<String, ?> configProperties; Map<String, Object> resolvedOriginals = new HashMap<>(); // As variable configs are strings, parse the originals and obtain the potential // variable configs. Map<String, String> indirectVariables = extractPotentialVariables(originals); resolvedOriginals.putAll(originals); if (configProviderProps == null || configProviderProps.isEmpty()) { providerConfigString = indirectVariables; configProperties = originals; } else { providerConfigString = extractPotentialVariables(configProviderProps); configProperties = configProviderProps; } Map<String, ConfigProvider> providers = instantiateConfigProviders(providerConfigString, configProperties); if (!providers.isEmpty()) { ConfigTransformer configTransformer = new ConfigTransformer(providers); ConfigTransformerResult result = configTransformer.transform(indirectVariables); if (!result.data().isEmpty()) { resolvedOriginals.putAll(result.data()); } } providers.values().forEach(x -> Utils.closeQuietly(x, "config provider")); return new ResolvingMap<>(resolvedOriginals, originals); } private Map<String, Object> configProviderProperties(String configProviderPrefix, Map<String, ?> providerConfigProperties) { Map<String, Object> result = new HashMap<>(); for (Map.Entry<String, ?> entry : providerConfigProperties.entrySet()) { String key = entry.getKey(); if (key.startsWith(configProviderPrefix) && key.length() > configProviderPrefix.length()) { result.put(key.substring(configProviderPrefix.length()), entry.getValue()); } } return result; } /** * Instantiates and configures the ConfigProviders. The config providers configs * are defined as follows: * config.providers : A comma-separated list of names for providers. * config.providers.{name}.class : The Java class name for a provider. * config.providers.{name}.param.{param-name} : A parameter to be passed to the * above Java class on initialization. * returns a map of config provider name and its instance. * * @param indirectConfigs The map of potential variable configs * @param providerConfigProperties The map of config provider configs * @return map map of config provider name and its instance. */ private Map<String, ConfigProvider> instantiateConfigProviders(Map<String, String> indirectConfigs, Map<String, ?> providerConfigProperties) { final String configProviders = indirectConfigs.get(CONFIG_PROVIDERS_CONFIG); if (configProviders == null || configProviders.isEmpty()) { return Collections.emptyMap(); } Map<String, String> providerMap = new HashMap<>(); for (String provider : configProviders.split(",")) { String providerClass = providerClassProperty(provider); if (indirectConfigs.containsKey(providerClass)) providerMap.put(provider, indirectConfigs.get(providerClass)); } // Instantiate Config Providers Map<String, ConfigProvider> configProviderInstances = new HashMap<>(); for (Map.Entry<String, String> entry : providerMap.entrySet()) { try { String prefix = CONFIG_PROVIDERS_CONFIG + "." + entry.getKey() + CONFIG_PROVIDERS_PARAM; Map<String, ?> configProperties = configProviderProperties(prefix, providerConfigProperties); ConfigProvider provider = Utils.newInstance(entry.getValue(), ConfigProvider.class); provider.configure(configProperties); configProviderInstances.put(entry.getKey(), provider); } catch (ClassNotFoundException e) { log.error("Could not load config provider class " + entry.getValue(), e); throw new ConfigException(providerClassProperty(entry.getKey()), entry.getValue(), "Could not load config provider class or one of its dependencies"); } } return configProviderInstances; } private static String providerClassProperty(String providerName) { return String.format("%s.%s.class", CONFIG_PROVIDERS_CONFIG, providerName); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; AbstractConfig that = (AbstractConfig) o; return originals.equals(that.originals); } @Override public int hashCode() { return originals.hashCode(); } /** * Marks keys retrieved via `get` as used. This is needed because * `Configurable.configure` takes a `Map` instead * of an `AbstractConfig` and we can't change that without breaking public API * like `Partitioner`. */ private class RecordingMap<V> extends HashMap<String, V> { private final String prefix; private final boolean withIgnoreFallback; RecordingMap() { this("", false); } RecordingMap(String prefix, boolean withIgnoreFallback) { this.prefix = prefix; this.withIgnoreFallback = withIgnoreFallback; } RecordingMap(Map<String, ? extends V> m) { this(m, "", false); } RecordingMap(Map<String, ? extends V> m, String prefix, boolean withIgnoreFallback) { super(m); this.prefix = prefix; this.withIgnoreFallback = withIgnoreFallback; } @Override public V get(Object key) { if (key instanceof String) { String stringKey = (String) key; String keyWithPrefix; if (prefix.isEmpty()) { keyWithPrefix = stringKey; } else { keyWithPrefix = prefix + stringKey; } ignore(keyWithPrefix); if (withIgnoreFallback) ignore(stringKey); } return super.get(key); } } /** * ResolvingMap keeps a track of the original map instance and the resolved * configs. * The originals are tracked in a separate nested map and may be a * `RecordingMap`; thus * any access to a value for a key needs to be recorded on the originals map. * The resolved configs are kept in the inherited map and are therefore mutable, * though any * mutations are not applied to the originals. */ private static class ResolvingMap<V> extends HashMap<String, V> { private final Map<String, ?> originals; ResolvingMap(Map<String, ? extends V> resolved, Map<String, ?> originals) { super(resolved); this.originals = Collections.unmodifiableMap(originals); } @Override public V get(Object key) { if (key instanceof String && originals.containsKey(key)) { // Intentionally ignore the result; call just to mark the original entry as used originals.get(key); } // But always use the resolved entry return super.get(key); } } public Map<String, Object> getValues() { return values; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/Config.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import java.util.List; public class Config { private final List<ConfigValue> configValues; public Config(List<ConfigValue> configValues) { this.configValues = configValues; } public List<ConfigValue> configValues() { return configValues; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/ConfigChangeCallback.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import org.apache.kafka.common.config.provider.ConfigProvider; /** * A callback passed to {@link ConfigProvider} for subscribing to changes. */ public interface ConfigChangeCallback { /** * Performs an action when configuration data changes. * * @param path the path at which the data resides * @param data the configuration data */ void onChange(String path, ConfigData data); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/ConfigData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import org.apache.kafka.common.config.provider.ConfigProvider; import java.util.Map; /** * Configuration data from a {@link ConfigProvider}. */ public class ConfigData { private final Map<String, String> data; private final Long ttl; /** * Creates a new ConfigData with the given data and TTL (in milliseconds). * * @param data a Map of key-value pairs * @param ttl the time-to-live of the data in milliseconds, or null if there is no TTL */ public ConfigData(Map<String, String> data, Long ttl) { this.data = data; this.ttl = ttl; } /** * Creates a new ConfigData with the given data. * * @param data a Map of key-value pairs */ public ConfigData(Map<String, String> data) { this(data, null); } /** * Returns the data. * * @return data a Map of key-value pairs */ public Map<String, String> data() { return data; } /** * Returns the TTL (in milliseconds). * * @return ttl the time-to-live (in milliseconds) of the data, or null if there is no TTL */ public Long ttl() { return ttl; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/ConfigDef.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import org.apache.kafka.common.config.types.Password; import org.apache.kafka.common.utils.Utils; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Supplier; import java.util.regex.Pattern; import java.util.stream.Collectors; /** * This class is used for specifying the set of expected configurations. For each configuration, you can specify * the name, the type, the default value, the documentation, the group information, the order in the group, * the width of the configuration value and the name suitable for display in the UI. * * You can provide special validation logic used for single configuration validation by overriding {@link Validator}. * * Moreover, you can specify the dependents of a configuration. The valid values and visibility of a configuration * may change according to the values of other configurations. You can override {@link Recommender} to get valid * values and set visibility of a configuration given the current configuration values. * * <p/> * To use the class: * <p/> * <pre> * ConfigDef defs = new ConfigDef(); * * defs.define(&quot;config_with_default&quot;, Type.STRING, &quot;default string value&quot;, &quot;Configuration with default value.&quot;); * defs.define(&quot;config_with_validator&quot;, Type.INT, 42, Range.atLeast(0), &quot;Configuration with user provided validator.&quot;); * defs.define(&quot;config_with_dependents&quot;, Type.INT, &quot;Configuration with dependents.&quot;, &quot;group&quot;, 1, &quot;Config With Dependents&quot;, Arrays.asList(&quot;config_with_default&quot;,&quot;config_with_validator&quot;)); * * Map&lt;String, String&gt; props = new HashMap&lt;&gt;(); * props.put(&quot;config_with_default&quot;, &quot;some value&quot;); * props.put(&quot;config_with_dependents&quot;, &quot;some other value&quot;); * * Map&lt;String, Object&gt; configs = defs.parse(props); * // will return &quot;some value&quot; * String someConfig = (String) configs.get(&quot;config_with_default&quot;); * // will return default value of 42 * int anotherConfig = (Integer) configs.get(&quot;config_with_validator&quot;); * * To validate the full configuration, use: * List&lt;Config&gt; configs = defs.validate(props); * The {@link Config} contains updated configuration information given the current configuration values. * </pre> * <p/> * This class can be used standalone or in combination with {@link AbstractConfig} which provides some additional * functionality for accessing configs. */ public class ConfigDef { private static final Pattern COMMA_WITH_WHITESPACE = Pattern.compile("\\s*,\\s*"); /** * A unique Java object which represents the lack of a default value. */ public static final Object NO_DEFAULT_VALUE = new Object(); private final Map<String, ConfigKey> configKeys; private final List<String> groups; private Set<String> configsWithNoParent; public ConfigDef() { configKeys = new LinkedHashMap<>(); groups = new LinkedList<>(); configsWithNoParent = null; } public ConfigDef(ConfigDef base) { configKeys = new LinkedHashMap<>(base.configKeys); groups = new LinkedList<>(base.groups); // It is not safe to copy this from the parent because we may subsequently add to the set of configs and // invalidate this configsWithNoParent = null; } /** * Returns unmodifiable set of properties names defined in this {@linkplain ConfigDef} * * @return new unmodifiable {@link Set} instance containing the keys */ public Set<String> names() { return Collections.unmodifiableSet(configKeys.keySet()); } public Map<String, Object> defaultValues() { Map<String, Object> defaultValues = new HashMap<>(); for (ConfigKey key : configKeys.values()) { if (key.defaultValue != NO_DEFAULT_VALUE) defaultValues.put(key.name, key.defaultValue); } return defaultValues; } public ConfigDef define(ConfigKey key) { if (configKeys.containsKey(key.name)) { throw new ConfigException("Configuration " + key.name + " is defined twice."); } if (key.group != null && !groups.contains(key.group)) { groups.add(key.group); } configKeys.put(key.name, key); return this; } /** * Define a new configuration * @param name the name of the config parameter * @param type the type of the config * @param defaultValue the default value to use if this config isn't present * @param validator the validator to use in checking the correctness of the config * @param importance the importance of this config * @param documentation the documentation string for the config * @param group the group this config belongs to * @param orderInGroup the order of this config in the group * @param width the width of the config * @param displayName the name suitable for display * @param dependents the configurations that are dependents of this configuration * @param recommender the recommender provides valid values given the parent configuration values * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender) { return define(new ConfigKey(name, type, defaultValue, validator, importance, documentation, group, orderInGroup, width, displayName, dependents, recommender, false)); } /** * Define a new configuration with no custom recommender * @param name the name of the config parameter * @param type the type of the config * @param defaultValue the default value to use if this config isn't present * @param validator the validator to use in checking the correctness of the config * @param importance the importance of this config * @param documentation the documentation string for the config * @param group the group this config belongs to * @param orderInGroup the order of this config in the group * @param width the width of the config * @param displayName the name suitable for display * @param dependents the configurations that are dependents of this configuration * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents) { return define(name, type, defaultValue, validator, importance, documentation, group, orderInGroup, width, displayName, dependents, null); } /** * Define a new configuration with no dependents * @param name the name of the config parameter * @param type the type of the config * @param defaultValue the default value to use if this config isn't present * @param validator the validator to use in checking the correctness of the config * @param importance the importance of this config * @param documentation the documentation string for the config * @param group the group this config belongs to * @param orderInGroup the order of this config in the group * @param width the width of the config * @param displayName the name suitable for display * @param recommender the recommender provides valid values given the parent configuration values * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, Recommender recommender) { return define(name, type, defaultValue, validator, importance, documentation, group, orderInGroup, width, displayName, Collections.emptyList(), recommender); } /** * Define a new configuration with no dependents and no custom recommender * @param name the name of the config parameter * @param type the type of the config * @param defaultValue the default value to use if this config isn't present * @param validator the validator to use in checking the correctness of the config * @param importance the importance of this config * @param documentation the documentation string for the config * @param group the group this config belongs to * @param orderInGroup the order of this config in the group * @param width the width of the config * @param displayName the name suitable for display * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName) { return define(name, type, defaultValue, validator, importance, documentation, group, orderInGroup, width, displayName, Collections.emptyList()); } /** * Define a new configuration with no special validation logic * @param name the name of the config parameter * @param type the type of the config * @param defaultValue the default value to use if this config isn't present * @param importance the importance of this config * @param documentation the documentation string for the config * @param group the group this config belongs to * @param orderInGroup the order of this config in the group * @param width the width of the config * @param displayName the name suitable for display * @param dependents the configurations that are dependents of this configuration * @param recommender the recommender provides valid values given the parent configuration values * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender) { return define(name, type, defaultValue, null, importance, documentation, group, orderInGroup, width, displayName, dependents, recommender); } /** * Define a new configuration with no special validation logic and no custom recommender * @param name the name of the config parameter * @param type the type of the config * @param defaultValue the default value to use if this config isn't present * @param importance the importance of this config * @param documentation the documentation string for the config * @param group the group this config belongs to * @param orderInGroup the order of this config in the group * @param width the width of the config * @param displayName the name suitable for display * @param dependents the configurations that are dependents of this configuration * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents) { return define(name, type, defaultValue, null, importance, documentation, group, orderInGroup, width, displayName, dependents, null); } /** * Define a new configuration with no special validation logic and no custom recommender * @param name the name of the config parameter * @param type the type of the config * @param defaultValue the default value to use if this config isn't present * @param importance the importance of this config * @param documentation the documentation string for the config * @param group the group this config belongs to * @param orderInGroup the order of this config in the group * @param width the width of the config * @param displayName the name suitable for display * @param recommender the recommender provides valid values given the parent configuration values * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, Recommender recommender) { return define(name, type, defaultValue, null, importance, documentation, group, orderInGroup, width, displayName, Collections.emptyList(), recommender); } /** * Define a new configuration with no special validation logic, not dependents and no custom recommender * @param name the name of the config parameter * @param type the type of the config * @param defaultValue the default value to use if this config isn't present * @param importance the importance of this config * @param documentation the documentation string for the config * @param group the group this config belongs to * @param orderInGroup the order of this config in the group * @param width the width of the config * @param displayName the name suitable for display * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName) { return define(name, type, defaultValue, null, importance, documentation, group, orderInGroup, width, displayName, Collections.emptyList()); } /** * Define a new configuration with no default value and no special validation logic * @param name the name of the config parameter * @param type the type of the config * @param importance the importance of this config * @param documentation the documentation string for the config * @param group the group this config belongs to * @param orderInGroup the order of this config in the group * @param width the width of the config * @param displayName the name suitable for display * @param dependents the configurations that are dependents of this configuration * @param recommender the recommender provides valid values given the parent configuration value * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender) { return define(name, type, NO_DEFAULT_VALUE, null, importance, documentation, group, orderInGroup, width, displayName, dependents, recommender); } /** * Define a new configuration with no default value, no special validation logic and no custom recommender * @param name the name of the config parameter * @param type the type of the config * @param importance the importance of this config * @param documentation the documentation string for the config * @param group the group this config belongs to * @param orderInGroup the order of this config in the group * @param width the width of the config * @param displayName the name suitable for display * @param dependents the configurations that are dependents of this configuration * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents) { return define(name, type, NO_DEFAULT_VALUE, null, importance, documentation, group, orderInGroup, width, displayName, dependents, null); } /** * Define a new configuration with no default value, no special validation logic and no custom recommender * @param name the name of the config parameter * @param type the type of the config * @param importance the importance of this config * @param documentation the documentation string for the config * @param group the group this config belongs to * @param orderInGroup the order of this config in the group * @param width the width of the config * @param displayName the name suitable for display * @param recommender the recommender provides valid values given the parent configuration value * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, Recommender recommender) { return define(name, type, NO_DEFAULT_VALUE, null, importance, documentation, group, orderInGroup, width, displayName, Collections.emptyList(), recommender); } /** * Define a new configuration with no default value, no special validation logic, no dependents and no custom recommender * @param name the name of the config parameter * @param type the type of the config * @param importance the importance of this config * @param documentation the documentation string for the config * @param group the group this config belongs to * @param orderInGroup the order of this config in the group * @param width the width of the config * @param displayName the name suitable for display * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName) { return define(name, type, NO_DEFAULT_VALUE, null, importance, documentation, group, orderInGroup, width, displayName, Collections.emptyList()); } /** * Define a new configuration with no group, no order in group, no width, no display name, no dependents and no custom recommender * @param name the name of the config parameter * @param type the type of the config * @param defaultValue the default value to use if this config isn't present * @param validator the validator to use in checking the correctness of the config * @param importance the importance of this config * @param documentation the documentation string for the config * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation) { return define(name, type, defaultValue, validator, importance, documentation, null, -1, Width.NONE, name); } /** * Define a new configuration with no special validation logic * @param name The name of the config parameter * @param type The type of the config * @param defaultValue The default value to use if this config isn't present * @param importance The importance of this config: is this something you will likely need to change. * @param documentation The documentation string for the config * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation) { return define(name, type, defaultValue, null, importance, documentation); } /** * Define a new configuration with no default value and no special validation logic * @param name The name of the config parameter * @param type The type of the config * @param importance The importance of this config: is this something you will likely need to change. * @param documentation The documentation string for the config * @return This ConfigDef so you can chain calls */ public ConfigDef define(String name, Type type, Importance importance, String documentation) { return define(name, type, NO_DEFAULT_VALUE, null, importance, documentation); } /** * Define a new internal configuration. Internal configuration won't show up in the docs and aren't * intended for general use. * @param name The name of the config parameter * @param type The type of the config * @param defaultValue The default value to use if this config isn't present * @param importance The importance of this config (i.e. is this something you will likely need to change?) * @return This ConfigDef so you can chain calls */ public ConfigDef defineInternal(final String name, final Type type, final Object defaultValue, final Importance importance) { return define(new ConfigKey(name, type, defaultValue, null, importance, "", "", -1, Width.NONE, name, Collections.emptyList(), null, true)); } /** * Define a new internal configuration. Internal configuration won't show up in the docs and aren't * intended for general use. * @param name The name of the config parameter * @param type The type of the config * @param defaultValue The default value to use if this config isn't present * @param validator The validator to use in checking the correctness of the config * @param importance The importance of this config (i.e. is this something you will likely need to change?) * @param documentation The documentation string for the config * @return This ConfigDef so you can chain calls */ public ConfigDef defineInternal(final String name, final Type type, final Object defaultValue, final Validator validator, final Importance importance, final String documentation) { return define(new ConfigKey(name, type, defaultValue, validator, importance, documentation, "", -1, Width.NONE, name, Collections.emptyList(), null, true)); } /** * Get the configuration keys * @return a map containing all configuration keys */ public Map<String, ConfigKey> configKeys() { return configKeys; } /** * Get the groups for the configuration * @return a list of group names */ public List<String> groups() { return groups; } /** * Add standard SSL client configuration options. * @return this */ public ConfigDef withClientSslSupport() { SslConfigs.addClientSslSupport(this); return this; } /** * Add standard SASL client configuration options. * @return this */ public ConfigDef withClientSaslSupport() { SaslConfigs.addClientSaslSupport(this); return this; } /** * Parse and validate configs against this configuration definition. The input is a map of configs. It is expected * that the keys of the map are strings, but the values can either be strings or they may already be of the * appropriate type (int, string, etc). This will work equally well with either java.util.Properties instances or a * programmatically constructed map. * * @param props The configs to parse and validate. * @return Parsed and validated configs. The key will be the config name and the value will be the value parsed into * the appropriate type (int, string, etc). */ public Map<String, Object> parse(Map<?, ?> props) { // Check all configurations are defined List<String> undefinedConfigKeys = undefinedDependentConfigs(); if (!undefinedConfigKeys.isEmpty()) { String joined = Utils.join(undefinedConfigKeys, ","); throw new ConfigException("Some configurations in are referred in the dependents, but not defined: " + joined); } // parse all known keys Map<String, Object> values = new HashMap<>(); for (ConfigKey key : configKeys.values()) values.put(key.name, parseValue(key, props.get(key.name), props.containsKey(key.name))); return values; } Object parseValue(ConfigKey key, Object value, boolean isSet) { Object parsedValue; if (isSet) { parsedValue = parseType(key.name, value, key.type); // props map doesn't contain setting, the key is required because no default value specified - its an error } else if (NO_DEFAULT_VALUE.equals(key.defaultValue)) { throw new ConfigException("Missing required configuration \"" + key.name + "\" which has no default value."); } else { // otherwise assign setting its default value parsedValue = key.defaultValue; } if (key.validator != null) { key.validator.ensureValid(key.name, parsedValue); } return parsedValue; } /** * Validate the current configuration values with the configuration definition. * @param props the current configuration values * @return List of Config, each Config contains the updated configuration information given * the current configuration values. */ public List<ConfigValue> validate(Map<String, String> props) { return new ArrayList<>(validateAll(props).values()); } public Map<String, ConfigValue> validateAll(Map<String, String> props) { Map<String, ConfigValue> configValues = new HashMap<>(); for (String name: configKeys.keySet()) { configValues.put(name, new ConfigValue(name)); } List<String> undefinedConfigKeys = undefinedDependentConfigs(); for (String undefinedConfigKey: undefinedConfigKeys) { ConfigValue undefinedConfigValue = new ConfigValue(undefinedConfigKey); undefinedConfigValue.addErrorMessage(undefinedConfigKey + " is referred in the dependents, but not defined."); undefinedConfigValue.visible(false); configValues.put(undefinedConfigKey, undefinedConfigValue); } Map<String, Object> parsed = parseForValidate(props, configValues); return validate(parsed, configValues); } // package accessible for testing Map<String, Object> parseForValidate(Map<String, String> props, Map<String, ConfigValue> configValues) { Map<String, Object> parsed = new HashMap<>(); Set<String> configsWithNoParent = getConfigsWithNoParent(); for (String name: configsWithNoParent) { parseForValidate(name, props, parsed, configValues); } return parsed; } private Map<String, ConfigValue> validate(Map<String, Object> parsed, Map<String, ConfigValue> configValues) { Set<String> configsWithNoParent = getConfigsWithNoParent(); for (String name: configsWithNoParent) { validate(name, parsed, configValues); } return configValues; } private List<String> undefinedDependentConfigs() { Set<String> undefinedConfigKeys = new HashSet<>(); for (ConfigKey configKey : configKeys.values()) { for (String dependent: configKey.dependents) { if (!configKeys.containsKey(dependent)) { undefinedConfigKeys.add(dependent); } } } return new ArrayList<>(undefinedConfigKeys); } // package accessible for testing Set<String> getConfigsWithNoParent() { if (this.configsWithNoParent != null) { return this.configsWithNoParent; } Set<String> configsWithParent = new HashSet<>(); for (ConfigKey configKey: configKeys.values()) { List<String> dependents = configKey.dependents; configsWithParent.addAll(dependents); } Set<String> configs = new HashSet<>(configKeys.keySet()); configs.removeAll(configsWithParent); this.configsWithNoParent = configs; return configs; } private void parseForValidate(String name, Map<String, String> props, Map<String, Object> parsed, Map<String, ConfigValue> configs) { if (!configKeys.containsKey(name)) { return; } ConfigKey key = configKeys.get(name); ConfigValue config = configs.get(name); Object value = null; if (props.containsKey(key.name)) { try { value = parseType(key.name, props.get(key.name), key.type); } catch (ConfigException e) { config.addErrorMessage(e.getMessage()); } } else if (NO_DEFAULT_VALUE.equals(key.defaultValue)) { config.addErrorMessage("Missing required configuration \"" + key.name + "\" which has no default value."); } else { value = key.defaultValue; } if (key.validator != null) { try { key.validator.ensureValid(key.name, value); } catch (ConfigException e) { config.addErrorMessage(e.getMessage()); } } config.value(value); parsed.put(name, value); for (String dependent: key.dependents) { parseForValidate(dependent, props, parsed, configs); } } private void validate(String name, Map<String, Object> parsed, Map<String, ConfigValue> configs) { if (!configKeys.containsKey(name)) { return; } ConfigKey key = configKeys.get(name); ConfigValue value = configs.get(name); if (key.recommender != null) { try { List<Object> recommendedValues = key.recommender.validValues(name, parsed); List<Object> originalRecommendedValues = value.recommendedValues(); if (!originalRecommendedValues.isEmpty()) { Set<Object> originalRecommendedValueSet = new HashSet<>(originalRecommendedValues); recommendedValues.removeIf(o -> !originalRecommendedValueSet.contains(o)); } value.recommendedValues(recommendedValues); value.visible(key.recommender.visible(name, parsed)); } catch (ConfigException e) { value.addErrorMessage(e.getMessage()); } } configs.put(name, value); for (String dependent: key.dependents) { validate(dependent, parsed, configs); } } /** * Parse a value according to its expected type. * @param name The config name * @param value The config value * @param type The expected type * @return The parsed object */ public static Object parseType(String name, Object value, Type type) { try { if (value == null) return null; String trimmed = null; if (value instanceof String) trimmed = ((String) value).trim(); switch (type) { case BOOLEAN: if (value instanceof String) { if (trimmed.equalsIgnoreCase("true")) return true; else if (trimmed.equalsIgnoreCase("false")) return false; else throw new ConfigException(name, value, "Expected value to be either true or false"); } else if (value instanceof Boolean) return value; else throw new ConfigException(name, value, "Expected value to be either true or false"); case PASSWORD: if (value instanceof Password) return value; else if (value instanceof String) return new Password(trimmed); else throw new ConfigException(name, value, "Expected value to be a string, but it was a " + value.getClass().getName()); case STRING: if (value instanceof String) return trimmed; else throw new ConfigException(name, value, "Expected value to be a string, but it was a " + value.getClass().getName()); case INT: if (value instanceof Integer) { return value; } else if (value instanceof String) { return Integer.parseInt(trimmed); } else { throw new ConfigException(name, value, "Expected value to be a 32-bit integer, but it was a " + value.getClass().getName()); } case SHORT: if (value instanceof Short) { return value; } else if (value instanceof String) { return Short.parseShort(trimmed); } else { throw new ConfigException(name, value, "Expected value to be a 16-bit integer (short), but it was a " + value.getClass().getName()); } case LONG: if (value instanceof Integer) return ((Integer) value).longValue(); if (value instanceof Long) return value; else if (value instanceof String) return Long.parseLong(trimmed); else throw new ConfigException(name, value, "Expected value to be a 64-bit integer (long), but it was a " + value.getClass().getName()); case DOUBLE: if (value instanceof Number) return ((Number) value).doubleValue(); else if (value instanceof String) return Double.parseDouble(trimmed); else throw new ConfigException(name, value, "Expected value to be a double, but it was a " + value.getClass().getName()); case LIST: if (value instanceof List) return value; else if (value instanceof String) if (trimmed.isEmpty()) return Collections.emptyList(); else return Arrays.asList(COMMA_WITH_WHITESPACE.split(trimmed, -1)); else throw new ConfigException(name, value, "Expected a comma separated list."); case CLASS: if (value instanceof Class) return value; else if (value instanceof String) { ClassLoader contextOrKafkaClassLoader = Utils.getContextOrKafkaClassLoader(); // Use loadClass here instead of Class.forName because the name we use here may be an alias // and not match the name of the class that gets loaded. If that happens, Class.forName can // throw an exception. Class<?> klass = contextOrKafkaClassLoader.loadClass(trimmed); // Invoke forName here with the true name of the requested class to cause class // initialization to take place. return Class.forName(klass.getName(), true, contextOrKafkaClassLoader); } else throw new ConfigException(name, value, "Expected a Class instance or class name."); default: throw new IllegalStateException("Unknown type."); } } catch (NumberFormatException e) { throw new ConfigException(name, value, "Not a number of type " + type); } catch (ClassNotFoundException e) { throw new ConfigException(name, value, "Class " + value + " could not be found."); } } public static String convertToString(Object parsedValue, Type type) { if (parsedValue == null) { return null; } if (type == null) { return parsedValue.toString(); } switch (type) { case BOOLEAN: case SHORT: case INT: case LONG: case DOUBLE: case STRING: case PASSWORD: return parsedValue.toString(); case LIST: List<?> valueList = (List<?>) parsedValue; return Utils.join(valueList, ","); case CLASS: Class<?> clazz = (Class<?>) parsedValue; return clazz.getName(); default: throw new IllegalStateException("Unknown type."); } } /** * Converts a map of config (key, value) pairs to a map of strings where each value * is converted to a string. This method should be used with care since it stores * actual password values to String. Values from this map should never be used in log entries. */ public static Map<String, String> convertToStringMapWithPasswordValues(Map<String, ?> configs) { Map<String, String> result = new HashMap<>(); for (Map.Entry<String, ?> entry : configs.entrySet()) { Object value = entry.getValue(); String strValue; if (value instanceof Password) strValue = ((Password) value).value(); else if (value instanceof List) strValue = convertToString(value, Type.LIST); else if (value instanceof Class) strValue = convertToString(value, Type.CLASS); else strValue = convertToString(value, null); if (strValue != null) result.put(entry.getKey(), strValue); } return result; } /** * The config types */ public enum Type { BOOLEAN, STRING, INT, SHORT, LONG, DOUBLE, LIST, CLASS, PASSWORD; public boolean isSensitive() { return this == PASSWORD; } } /** * The importance level for a configuration */ public enum Importance { HIGH, MEDIUM, LOW } /** * The width of a configuration value */ public enum Width { NONE, SHORT, MEDIUM, LONG } /** * This is used by the {@link #validate(Map)} to get valid values for a configuration given the current * configuration values in order to perform full configuration validation and visibility modification. * In case that there are dependencies between configurations, the valid values and visibility * for a configuration may change given the values of other configurations. */ public interface Recommender { /** * The valid values for the configuration given the current configuration values. * @param name The name of the configuration * @param parsedConfig The parsed configuration values * @return The list of valid values. To function properly, the returned objects should have the type * defined for the configuration using the recommender. */ List<Object> validValues(String name, Map<String, Object> parsedConfig); /** * Set the visibility of the configuration given the current configuration values. * @param name The name of the configuration * @param parsedConfig The parsed configuration values * @return The visibility of the configuration */ boolean visible(String name, Map<String, Object> parsedConfig); } /** * Validation logic the user may provide to perform single configuration validation. */ public interface Validator { /** * Perform single configuration validation. * @param name The name of the configuration * @param value The value of the configuration * @throws ConfigException if the value is invalid. */ void ensureValid(String name, Object value); } /** * Validation logic for numeric ranges */ public static class Range implements Validator { private final Number min; private final Number max; /** * A numeric range with inclusive upper bound and inclusive lower bound * @param min the lower bound * @param max the upper bound */ private Range(Number min, Number max) { this.min = min; this.max = max; } /** * A numeric range that checks only the lower bound * * @param min The minimum acceptable value */ public static Range atLeast(Number min) { return new Range(min, null); } /** * A numeric range that checks both the upper (inclusive) and lower bound */ public static Range between(Number min, Number max) { return new Range(min, max); } public void ensureValid(String name, Object o) { if (o == null) throw new ConfigException(name, null, "Value must be non-null"); Number n = (Number) o; if (min != null && n.doubleValue() < min.doubleValue()) throw new ConfigException(name, o, "Value must be at least " + min); if (max != null && n.doubleValue() > max.doubleValue()) throw new ConfigException(name, o, "Value must be no more than " + max); } public String toString() { if (min == null && max == null) return "[...]"; else if (min == null) return "[...," + max + "]"; else if (max == null) return "[" + min + ",...]"; else return "[" + min + ",...," + max + "]"; } } public static class ValidList implements Validator { final ValidString validString; private ValidList(List<String> validStrings) { this.validString = new ValidString(validStrings); } public static ValidList in(String... validStrings) { return new ValidList(Arrays.asList(validStrings)); } @Override public void ensureValid(final String name, final Object value) { @SuppressWarnings("unchecked") List<String> values = (List<String>) value; for (String string : values) { validString.ensureValid(name, string); } } public String toString() { return validString.toString(); } } public static class ValidString implements Validator { final List<String> validStrings; private ValidString(List<String> validStrings) { this.validStrings = validStrings; } public static ValidString in(String... validStrings) { return new ValidString(Arrays.asList(validStrings)); } @Override public void ensureValid(String name, Object o) { String s = (String) o; if (!validStrings.contains(s)) { throw new ConfigException(name, o, "String must be one of: " + Utils.join(validStrings, ", ")); } } public String toString() { return "[" + Utils.join(validStrings, ", ") + "]"; } } public static class CaseInsensitiveValidString implements Validator { final Set<String> validStrings; private CaseInsensitiveValidString(List<String> validStrings) { this.validStrings = validStrings.stream() .map(s -> s.toUpperCase(Locale.ROOT)) .collect(Collectors.toSet()); } public static CaseInsensitiveValidString in(String... validStrings) { return new CaseInsensitiveValidString(Arrays.asList(validStrings)); } @Override public void ensureValid(String name, Object o) { String s = (String) o; if (s == null || !validStrings.contains(s.toUpperCase(Locale.ROOT))) { throw new ConfigException(name, o, "String must be one of (case insensitive): " + Utils.join(validStrings, ", ")); } } public String toString() { return "(case insensitive) [" + Utils.join(validStrings, ", ") + "]"; } } public static class NonNullValidator implements Validator { @Override public void ensureValid(String name, Object value) { if (value == null) { // Pass in the string null to avoid the spotbugs warning throw new ConfigException(name, "null", "entry must be non null"); } } public String toString() { return "non-null string"; } } public static class LambdaValidator implements Validator { BiConsumer<String, Object> ensureValid; Supplier<String> toStringFunction; private LambdaValidator(BiConsumer<String, Object> ensureValid, Supplier<String> toStringFunction) { this.ensureValid = ensureValid; this.toStringFunction = toStringFunction; } public static LambdaValidator with(BiConsumer<String, Object> ensureValid, Supplier<String> toStringFunction) { return new LambdaValidator(ensureValid, toStringFunction); } @Override public void ensureValid(String name, Object value) { ensureValid.accept(name, value); } @Override public String toString() { return toStringFunction.get(); } } public static class CompositeValidator implements Validator { private final List<Validator> validators; private CompositeValidator(List<Validator> validators) { this.validators = Collections.unmodifiableList(validators); } public static CompositeValidator of(Validator... validators) { return new CompositeValidator(Arrays.asList(validators)); } @Override public void ensureValid(String name, Object value) { for (Validator validator: validators) { validator.ensureValid(name, value); } } @Override public String toString() { if (validators == null) return ""; StringBuilder desc = new StringBuilder(); for (Validator v: validators) { if (desc.length() > 0) { desc.append(',').append(' '); } desc.append(v); } return desc.toString(); } } public static class NonEmptyString implements Validator { @Override public void ensureValid(String name, Object o) { String s = (String) o; if (s != null && s.isEmpty()) { throw new ConfigException(name, o, "String must be non-empty"); } } @Override public String toString() { return "non-empty string"; } } public static class NonEmptyStringWithoutControlChars implements Validator { public static NonEmptyStringWithoutControlChars nonEmptyStringWithoutControlChars() { return new NonEmptyStringWithoutControlChars(); } @Override public void ensureValid(String name, Object value) { String s = (String) value; if (s == null) { // This can happen during creation of the config object due to no default value being defined for the // name configuration - a missing name parameter is caught when checking for mandatory parameters, // thus we can ok a null value here return; } else if (s.isEmpty()) { throw new ConfigException(name, value, "String may not be empty"); } // Check name string for illegal characters ArrayList<Integer> foundIllegalCharacters = new ArrayList<>(); for (int i = 0; i < s.length(); i++) { if (Character.isISOControl(s.codePointAt(i))) { foundIllegalCharacters.add(s.codePointAt(i)); } } if (!foundIllegalCharacters.isEmpty()) { throw new ConfigException(name, value, "String may not contain control sequences but had the following ASCII chars: " + Utils.join(foundIllegalCharacters, ", ")); } } public String toString() { return "non-empty string without ISO control characters"; } } public static class ListSize implements Validator { final int maxSize; private ListSize(final int maxSize) { this.maxSize = maxSize; } public static ListSize atMostOfSize(final int maxSize) { return new ListSize(maxSize); } @Override public void ensureValid(final String name, final Object value) { @SuppressWarnings("unchecked") List<String> values = (List<String>) value; if (values.size() > maxSize) { throw new ConfigException(name, value, "exceeds maximum list size of [" + maxSize + "]."); } } @Override public String toString() { return "List containing maximum of " + maxSize + " elements"; } } public static class ConfigKey { public final String name; public final Type type; public final String documentation; public final Object defaultValue; public final Validator validator; public final Importance importance; public final String group; public final int orderInGroup; public final Width width; public final String displayName; public final List<String> dependents; public final Recommender recommender; public final boolean internalConfig; public ConfigKey(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender, boolean internalConfig) { this.name = name; this.type = type; this.defaultValue = NO_DEFAULT_VALUE.equals(defaultValue) ? NO_DEFAULT_VALUE : parseType(name, defaultValue, type); this.validator = validator; this.importance = importance; if (this.validator != null && hasDefault()) this.validator.ensureValid(name, this.defaultValue); this.documentation = documentation; this.dependents = dependents; this.group = group; this.orderInGroup = orderInGroup; this.width = width; this.displayName = displayName; this.recommender = recommender; this.internalConfig = internalConfig; } public boolean hasDefault() { return !NO_DEFAULT_VALUE.equals(this.defaultValue); } public Type type() { return type; } } protected List<String> headers() { return Arrays.asList("Name", "Description", "Type", "Default", "Valid Values", "Importance"); } protected String getConfigValue(ConfigKey key, String headerName) { switch (headerName) { case "Name": return key.name; case "Description": return key.documentation; case "Type": return key.type.toString().toLowerCase(Locale.ROOT); case "Default": if (key.hasDefault()) { if (key.defaultValue == null) return "null"; String defaultValueStr = convertToString(key.defaultValue, key.type); if (defaultValueStr.isEmpty()) return "\"\""; else { String suffix = ""; if (key.name.endsWith(".bytes")) { suffix = niceMemoryUnits(((Number) key.defaultValue).longValue()); } else if (key.name.endsWith(".ms")) { suffix = niceTimeUnits(((Number) key.defaultValue).longValue()); } return defaultValueStr + suffix; } } else return ""; case "Valid Values": return key.validator != null ? key.validator.toString() : ""; case "Importance": return key.importance.toString().toLowerCase(Locale.ROOT); default: throw new RuntimeException("Can't find value for header '" + headerName + "' in " + key.name); } } static String niceMemoryUnits(long bytes) { long value = bytes; int i = 0; while (value != 0 && i < 4) { if (value % 1024L == 0) { value /= 1024L; i++; } else { break; } } String resultFormat = " (" + value + " %s" + (value == 1 ? ")" : "s)"); switch (i) { case 1: return String.format(resultFormat, "kibibyte"); case 2: return String.format(resultFormat, "mebibyte"); case 3: return String.format(resultFormat, "gibibyte"); case 4: return String.format(resultFormat, "tebibyte"); default: return ""; } } static String niceTimeUnits(long millis) { long value = millis; long[] divisors = {1000, 60, 60, 24}; String[] units = {"second", "minute", "hour", "day"}; int i = 0; while (value != 0 && i < 4) { if (value % divisors[i] == 0) { value /= divisors[i]; i++; } else { break; } } if (i > 0) { return " (" + value + " " + units[i - 1] + (value > 1 ? "s)" : ")"); } return ""; } public String toHtmlTable() { return toHtmlTable(Collections.emptyMap()); } private void addHeader(StringBuilder builder, String headerName) { builder.append("<th>"); builder.append(headerName); builder.append("</th>\n"); } private void addColumnValue(StringBuilder builder, String value) { builder.append("<td>"); builder.append(value); builder.append("</td>"); } /** * Converts this config into an HTML table that can be embedded into docs. * If <code>dynamicUpdateModes</code> is non-empty, a "Dynamic Update Mode" column * will be included n the table with the value of the update mode. Default * mode is "read-only". * @param dynamicUpdateModes Config name -&gt; update mode mapping */ public String toHtmlTable(Map<String, String> dynamicUpdateModes) { boolean hasUpdateModes = !dynamicUpdateModes.isEmpty(); List<ConfigKey> configs = sortedConfigs(); StringBuilder b = new StringBuilder(); b.append("<table class=\"data-table\"><tbody>\n"); b.append("<tr>\n"); // print column headers for (String headerName : headers()) { addHeader(b, headerName); } if (hasUpdateModes) addHeader(b, "Dynamic Update Mode"); b.append("</tr>\n"); for (ConfigKey key : configs) { if (key.internalConfig) { continue; } b.append("<tr>\n"); // print column values for (String headerName : headers()) { addColumnValue(b, getConfigValue(key, headerName)); b.append("</td>"); } if (hasUpdateModes) { String updateMode = dynamicUpdateModes.get(key.name); if (updateMode == null) updateMode = "read-only"; addColumnValue(b, updateMode); } b.append("</tr>\n"); } b.append("</tbody></table>"); return b.toString(); } /** * Get the configs formatted with reStructuredText, suitable for embedding in Sphinx * documentation. */ public String toRst() { StringBuilder b = new StringBuilder(); for (ConfigKey key : sortedConfigs()) { if (key.internalConfig) { continue; } getConfigKeyRst(key, b); b.append("\n"); } return b.toString(); } /** * Configs with new metadata (group, orderInGroup, dependents) formatted with reStructuredText, suitable for embedding in Sphinx * documentation. */ public String toEnrichedRst() { StringBuilder b = new StringBuilder(); String lastKeyGroupName = ""; for (ConfigKey key : sortedConfigs()) { if (key.internalConfig) { continue; } if (key.group != null) { if (!lastKeyGroupName.equalsIgnoreCase(key.group)) { b.append(key.group).append("\n"); char[] underLine = new char[key.group.length()]; Arrays.fill(underLine, '^'); b.append(new String(underLine)).append("\n\n"); } lastKeyGroupName = key.group; } getConfigKeyRst(key, b); if (key.dependents != null && key.dependents.size() > 0) { int j = 0; b.append(" * Dependents: "); for (String dependent : key.dependents) { b.append("``"); b.append(dependent); if (++j == key.dependents.size()) b.append("``"); else b.append("``, "); } b.append("\n"); } b.append("\n"); } return b.toString(); } /** * Shared content on Rst and Enriched Rst. */ private void getConfigKeyRst(ConfigKey key, StringBuilder b) { b.append("``").append(key.name).append("``").append("\n"); if (key.documentation != null) { for (String docLine : key.documentation.split("\n")) { if (docLine.length() == 0) { continue; } b.append(" ").append(docLine).append("\n\n"); } } else { b.append("\n"); } b.append(" * Type: ").append(getConfigValue(key, "Type")).append("\n"); if (key.hasDefault()) { b.append(" * Default: ").append(getConfigValue(key, "Default")).append("\n"); } if (key.validator != null) { b.append(" * Valid Values: ").append(getConfigValue(key, "Valid Values")).append("\n"); } b.append(" * Importance: ").append(getConfigValue(key, "Importance")).append("\n"); } /** * Get a list of configs sorted taking the 'group' and 'orderInGroup' into account. * * If grouping is not specified, the result will reflect "natural" order: listing required fields first, then ordering by importance, and finally by name. */ private List<ConfigKey> sortedConfigs() { final Map<String, Integer> groupOrd = new HashMap<>(groups.size()); int ord = 0; for (String group: groups) { groupOrd.put(group, ord++); } List<ConfigKey> configs = new ArrayList<>(configKeys.values()); Collections.sort(configs, (k1, k2) -> compare(k1, k2, groupOrd)); return configs; } private int compare(ConfigKey k1, ConfigKey k2, Map<String, Integer> groupOrd) { int cmp = k1.group == null ? (k2.group == null ? 0 : -1) : (k2.group == null ? 1 : Integer.compare(groupOrd.get(k1.group), groupOrd.get(k2.group))); if (cmp == 0) { cmp = Integer.compare(k1.orderInGroup, k2.orderInGroup); if (cmp == 0) { // first take anything with no default value if (!k1.hasDefault() && k2.hasDefault()) cmp = -1; else if (!k2.hasDefault() && k1.hasDefault()) cmp = 1; else { cmp = k1.importance.compareTo(k2.importance); if (cmp == 0) return k1.name.compareTo(k2.name); } } } return cmp; } public void embed(final String keyPrefix, final String groupPrefix, final int startingOrd, final ConfigDef child) { int orderInGroup = startingOrd; for (ConfigKey key : child.sortedConfigs()) { define(new ConfigKey( keyPrefix + key.name, key.type, key.defaultValue, embeddedValidator(keyPrefix, key.validator), key.importance, key.documentation, groupPrefix + (key.group == null ? "" : ": " + key.group), orderInGroup++, key.width, key.displayName, embeddedDependents(keyPrefix, key.dependents), embeddedRecommender(keyPrefix, key.recommender), key.internalConfig)); } } /** * Returns a new validator instance that delegates to the base validator but unprefixes the config name along the way. */ private static Validator embeddedValidator(final String keyPrefix, final Validator base) { if (base == null) return null; return new Validator() { public void ensureValid(String name, Object value) { base.ensureValid(name.substring(keyPrefix.length()), value); } @Override public String toString() { return base.toString(); } }; } /** * Updated list of dependent configs with the specified {@code prefix} added. */ private static List<String> embeddedDependents(final String keyPrefix, final List<String> dependents) { if (dependents == null) return null; final List<String> updatedDependents = new ArrayList<>(dependents.size()); for (String dependent : dependents) { updatedDependents.add(keyPrefix + dependent); } return updatedDependents; } /** * Returns a new recommender instance that delegates to the base recommender but unprefixes the input parameters along the way. */ private static Recommender embeddedRecommender(final String keyPrefix, final Recommender base) { if (base == null) return null; return new Recommender() { private String unprefixed(String k) { return k.substring(keyPrefix.length()); } private Map<String, Object> unprefixed(Map<String, Object> parsedConfig) { final Map<String, Object> unprefixedParsedConfig = new HashMap<>(parsedConfig.size()); for (Map.Entry<String, Object> e : parsedConfig.entrySet()) { if (e.getKey().startsWith(keyPrefix)) { unprefixedParsedConfig.put(unprefixed(e.getKey()), e.getValue()); } } return unprefixedParsedConfig; } @Override public List<Object> validValues(String name, Map<String, Object> parsedConfig) { return base.validValues(unprefixed(name), unprefixed(parsedConfig)); } @Override public boolean visible(String name, Map<String, Object> parsedConfig) { return base.visible(unprefixed(name), unprefixed(parsedConfig)); } }; } public String toHtml() { return toHtml(Collections.emptyMap()); } /** * Converts this config into an HTML list that can be embedded into docs. * @param headerDepth The top level header depth in the generated HTML. * @param idGenerator A function for computing the HTML id attribute in the generated HTML from a given config name. */ public String toHtml(int headerDepth, Function<String, String> idGenerator) { return toHtml(headerDepth, idGenerator, Collections.emptyMap()); } /** * Converts this config into an HTML list that can be embedded into docs. * If <code>dynamicUpdateModes</code> is non-empty, a "Dynamic Update Mode" label * will be included in the config details with the value of the update mode. Default * mode is "read-only". * @param dynamicUpdateModes Config name -&gt; update mode mapping. */ public String toHtml(Map<String, String> dynamicUpdateModes) { return toHtml(4, Function.identity(), dynamicUpdateModes); } /** * Converts this config into an HTML list that can be embedded into docs. * If <code>dynamicUpdateModes</code> is non-empty, a "Dynamic Update Mode" label * will be included in the config details with the value of the update mode. Default * mode is "read-only". * @param headerDepth The top level header depth in the generated HTML. * @param idGenerator A function for computing the HTML id attribute in the generated HTML from a given config name. * @param dynamicUpdateModes Config name -&gt; update mode mapping. */ public String toHtml(int headerDepth, Function<String, String> idGenerator, Map<String, String> dynamicUpdateModes) { boolean hasUpdateModes = !dynamicUpdateModes.isEmpty(); List<ConfigKey> configs = sortedConfigs(); StringBuilder b = new StringBuilder(); b.append("<ul class=\"config-list\">\n"); for (ConfigKey key : configs) { if (key.internalConfig) { continue; } b.append("<li>\n"); b.append(String.format("<h%1$d>" + "<a id=\"%3$s\"></a><a id=\"%2$s\" href=\"#%2$s\">%3$s</a>" + "</h%1$d>%n", headerDepth, idGenerator.apply(key.name), key.name)); b.append("<p>"); if (key.documentation != null) { b.append(key.documentation.replaceAll("\n", "<br>")); } b.append("</p>\n"); b.append("<table>" + "<tbody>\n"); for (String detail : headers()) { if (detail.equals("Name") || detail.equals("Description")) continue; addConfigDetail(b, detail, getConfigValue(key, detail)); } if (hasUpdateModes) { String updateMode = dynamicUpdateModes.get(key.name); if (updateMode == null) updateMode = "read-only"; addConfigDetail(b, "Update Mode", updateMode); } b.append("</tbody></table>\n"); b.append("</li>\n"); } b.append("</ul>\n"); return b.toString(); } private static void addConfigDetail(StringBuilder builder, String name, String value) { builder.append("<tr>" + "<th>" + name + ":</th>" + "<td>" + value + "</td>" + "</tr>\n"); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/ConfigException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import org.apache.kafka.common.KafkaException; /** * Thrown if the user supplies an invalid configuration */ public class ConfigException extends KafkaException { private static final long serialVersionUID = 1L; public ConfigException(String message) { super(message); } public ConfigException(String name, Object value) { this(name, value, null); } public ConfigException(String name, Object value, String message) { super("Invalid value " + value + " for configuration " + name + (message == null ? "" : ": " + message)); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/ConfigResource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import java.util.Arrays; import java.util.Collections; import java.util.Map; import java.util.Objects; import java.util.function.Function; import java.util.stream.Collectors; /** * A class representing resources that have configs. */ public final class ConfigResource { /** * Type of resource. */ public enum Type { BROKER_LOGGER((byte) 8), BROKER((byte) 4), TOPIC((byte) 2), UNKNOWN((byte) 0); private static final Map<Byte, Type> TYPES = Collections.unmodifiableMap( Arrays.stream(values()).collect(Collectors.toMap(Type::id, Function.identity())) ); private final byte id; Type(final byte id) { this.id = id; } public byte id() { return id; } public static Type forId(final byte id) { return TYPES.getOrDefault(id, UNKNOWN); } } private final Type type; private final String name; /** * Create an instance of this class with the provided parameters. * * @param type a non-null resource type * @param name a non-null resource name */ public ConfigResource(Type type, String name) { Objects.requireNonNull(type, "type should not be null"); Objects.requireNonNull(name, "name should not be null"); this.type = type; this.name = name; } /** * Return the resource type. */ public Type type() { return type; } /** * Return the resource name. */ public String name() { return name; } /** * Returns true if this is the default resource of a resource type. * Resource name is empty for the default resource. */ public boolean isDefault() { return name.isEmpty(); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ConfigResource that = (ConfigResource) o; return type == that.type && name.equals(that.name); } @Override public int hashCode() { int result = type.hashCode(); result = 31 * result + name.hashCode(); return result; } @Override public String toString() { return "ConfigResource(type=" + type + ", name='" + name + "')"; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/ConfigTransformer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import org.apache.kafka.common.config.provider.ConfigProvider; import org.apache.kafka.common.config.provider.FileConfigProvider; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * This class wraps a set of {@link ConfigProvider} instances and uses them to perform * transformations. * * <p>The default variable pattern is of the form <code>${provider:[path:]key}</code>, * where the <code>provider</code> corresponds to a {@link ConfigProvider} instance, as passed to * {@link ConfigTransformer#ConfigTransformer(Map)}. The pattern will extract a set * of paths (which are optional) and keys and then pass them to {@link ConfigProvider#get(String, Set)} to obtain the * values with which to replace the variables. * * <p>For example, if a Map consisting of an entry with a provider name "file" and provider instance * {@link FileConfigProvider} is passed to the {@link ConfigTransformer#ConfigTransformer(Map)}, and a Properties * file with contents * <pre> * fileKey=someValue * </pre> * resides at the path "/tmp/properties.txt", then when a configuration Map which has an entry with a key "someKey" and * a value "${file:/tmp/properties.txt:fileKey}" is passed to the {@link #transform(Map)} method, then the transformed * Map will have an entry with key "someKey" and a value "someValue". * * <p>This class only depends on {@link ConfigProvider#get(String, Set)} and does not depend on subscription support * in a {@link ConfigProvider}, such as the {@link ConfigProvider#subscribe(String, Set, ConfigChangeCallback)} and * {@link ConfigProvider#unsubscribe(String, Set, ConfigChangeCallback)} methods. */ public class ConfigTransformer { public static final Pattern DEFAULT_PATTERN = Pattern.compile("\\$\\{([^}]*?):(([^}]*?):)?([^}]*?)\\}"); private static final String EMPTY_PATH = ""; private final Map<String, ConfigProvider> configProviders; /** * Creates a ConfigTransformer with the default pattern, of the form <code>${provider:[path:]key}</code>. * * @param configProviders a Map of provider names and {@link ConfigProvider} instances. */ public ConfigTransformer(Map<String, ConfigProvider> configProviders) { this.configProviders = configProviders; } /** * Transforms the given configuration data by using the {@link ConfigProvider} instances to * look up values to replace the variables in the pattern. * * @param configs the configuration values to be transformed * @return an instance of {@link ConfigTransformerResult} */ public ConfigTransformerResult transform(Map<String, String> configs) { Map<String, Map<String, Set<String>>> keysByProvider = new HashMap<>(); Map<String, Map<String, Map<String, String>>> lookupsByProvider = new HashMap<>(); // Collect the variables from the given configs that need transformation for (Map.Entry<String, String> config : configs.entrySet()) { if (config.getValue() != null) { List<ConfigVariable> vars = getVars(config.getValue(), DEFAULT_PATTERN); for (ConfigVariable var : vars) { Map<String, Set<String>> keysByPath = keysByProvider.computeIfAbsent(var.providerName, k -> new HashMap<>()); Set<String> keys = keysByPath.computeIfAbsent(var.path, k -> new HashSet<>()); keys.add(var.variable); } } } // Retrieve requested variables from the ConfigProviders Map<String, Long> ttls = new HashMap<>(); for (Map.Entry<String, Map<String, Set<String>>> entry : keysByProvider.entrySet()) { String providerName = entry.getKey(); ConfigProvider provider = configProviders.get(providerName); Map<String, Set<String>> keysByPath = entry.getValue(); if (provider != null && keysByPath != null) { for (Map.Entry<String, Set<String>> pathWithKeys : keysByPath.entrySet()) { String path = pathWithKeys.getKey(); Set<String> keys = new HashSet<>(pathWithKeys.getValue()); ConfigData configData = provider.get(path, keys); Map<String, String> data = configData.data(); Long ttl = configData.ttl(); if (ttl != null && ttl >= 0) { ttls.put(path, ttl); } Map<String, Map<String, String>> keyValuesByPath = lookupsByProvider.computeIfAbsent(providerName, k -> new HashMap<>()); keyValuesByPath.put(path, data); } } } // Perform the transformations by performing variable replacements Map<String, String> data = new HashMap<>(configs); for (Map.Entry<String, String> config : configs.entrySet()) { data.put(config.getKey(), replace(lookupsByProvider, config.getValue(), DEFAULT_PATTERN)); } return new ConfigTransformerResult(data, ttls); } private static List<ConfigVariable> getVars(String value, Pattern pattern) { List<ConfigVariable> configVars = new ArrayList<>(); Matcher matcher = pattern.matcher(value); while (matcher.find()) { configVars.add(new ConfigVariable(matcher)); } return configVars; } private static String replace(Map<String, Map<String, Map<String, String>>> lookupsByProvider, String value, Pattern pattern) { if (value == null) { return null; } Matcher matcher = pattern.matcher(value); StringBuilder builder = new StringBuilder(); int i = 0; while (matcher.find()) { ConfigVariable configVar = new ConfigVariable(matcher); Map<String, Map<String, String>> lookupsByPath = lookupsByProvider.get(configVar.providerName); if (lookupsByPath != null) { Map<String, String> keyValues = lookupsByPath.get(configVar.path); String replacement = keyValues.get(configVar.variable); builder.append(value, i, matcher.start()); if (replacement == null) { // No replacements will be performed; just return the original value builder.append(matcher.group(0)); } else { builder.append(replacement); } i = matcher.end(); } } builder.append(value, i, value.length()); return builder.toString(); } private static class ConfigVariable { final String providerName; final String path; final String variable; ConfigVariable(Matcher matcher) { this.providerName = matcher.group(1); this.path = matcher.group(3) != null ? matcher.group(3) : EMPTY_PATH; this.variable = matcher.group(4); } public String toString() { return "(" + providerName + ":" + (path != null ? path + ":" : "") + variable + ")"; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/ConfigTransformerResult.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import org.apache.kafka.common.config.provider.ConfigProvider; import java.util.Map; /** * The result of a transformation from {@link ConfigTransformer}. */ public class ConfigTransformerResult { private final Map<String, Long> ttls; private final Map<String, String> data; /** * Creates a new ConfigTransformerResult with the given data and TTL values for a set of paths. * * @param data a Map of key-value pairs * @param ttls a Map of path and TTL values (in milliseconds) */ public ConfigTransformerResult(Map<String, String> data, Map<String, Long> ttls) { this.data = data; this.ttls = ttls; } /** * Returns the transformed data, with variables replaced with corresponding values from the * ConfigProvider instances if found. * * <p>Modifying the transformed data that is returned does not affect the {@link ConfigProvider} nor the * original data that was used as the source of the transformation. * * @return data a Map of key-value pairs */ public Map<String, String> data() { return data; } /** * Returns the TTL values (in milliseconds) returned from the ConfigProvider instances for a given set of paths. * * @return data a Map of path and TTL values */ public Map<String, Long> ttls() { return ttls; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/ConfigValue.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import java.util.ArrayList; import java.util.List; import java.util.Objects; public class ConfigValue { private final String name; private Object value; private List<Object> recommendedValues; private final List<String> errorMessages; private boolean visible; public ConfigValue(String name) { this(name, null, new ArrayList<>(), new ArrayList<>()); } public ConfigValue(String name, Object value, List<Object> recommendedValues, List<String> errorMessages) { this.name = name; this.value = value; this.recommendedValues = recommendedValues; this.errorMessages = errorMessages; this.visible = true; } public String name() { return name; } public Object value() { return value; } public List<Object> recommendedValues() { return recommendedValues; } public List<String> errorMessages() { return errorMessages; } public boolean visible() { return visible; } public void value(Object value) { this.value = value; } public void recommendedValues(List<Object> recommendedValues) { this.recommendedValues = recommendedValues; } public void addErrorMessage(String errorMessage) { this.errorMessages.add(errorMessage); } public void visible(boolean visible) { this.visible = visible; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ConfigValue that = (ConfigValue) o; return Objects.equals(name, that.name) && Objects.equals(value, that.value) && Objects.equals(recommendedValues, that.recommendedValues) && Objects.equals(errorMessages, that.errorMessages) && Objects.equals(visible, that.visible); } @Override public int hashCode() { return Objects.hash(name, value, recommendedValues, errorMessages, visible); } @Override public String toString() { return "[" + name + "," + value + "," + recommendedValues + "," + errorMessages + "," + visible + "]"; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/LogLevelConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import java.util.Arrays; import java.util.HashSet; import java.util.Set; /** * This class holds definitions for log level configurations related to Kafka's application logging. See KIP-412 for additional information */ public class LogLevelConfig { /* * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. */ /** * The <code>FATAL</code> level designates a very severe error * that will lead the Kafka broker to abort. */ public static final String FATAL_LOG_LEVEL = "FATAL"; /** * The <code>ERROR</code> level designates error events that * might still allow the broker to continue running. */ public static final String ERROR_LOG_LEVEL = "ERROR"; /** * The <code>WARN</code> level designates potentially harmful situations. */ public static final String WARN_LOG_LEVEL = "WARN"; /** * The <code>INFO</code> level designates informational messages * that highlight normal Kafka events at a coarse-grained level */ public static final String INFO_LOG_LEVEL = "INFO"; /** * The <code>DEBUG</code> level designates fine-grained * informational events that are most useful to debug Kafka */ public static final String DEBUG_LOG_LEVEL = "DEBUG"; /** * The <code>TRACE</code> level designates finer-grained * informational events than the <code>DEBUG</code> level. */ public static final String TRACE_LOG_LEVEL = "TRACE"; public static final Set<String> VALID_LOG_LEVELS = new HashSet<>(Arrays.asList( FATAL_LOG_LEVEL, ERROR_LOG_LEVEL, WARN_LOG_LEVEL, INFO_LOG_LEVEL, DEBUG_LOG_LEVEL, TRACE_LOG_LEVEL )); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/SaslConfigs.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import org.apache.kafka.common.config.ConfigDef.Range; public class SaslConfigs { private static final String OAUTHBEARER_NOTE = " Currently applies only to OAUTHBEARER."; /* * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. */ /** SASL mechanism configuration - standard mechanism names are listed <a href="http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml">here</a>. */ public static final String SASL_MECHANISM = "sasl.mechanism"; public static final String SASL_MECHANISM_DOC = "SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism."; public static final String GSSAPI_MECHANISM = "GSSAPI"; public static final String DEFAULT_SASL_MECHANISM = GSSAPI_MECHANISM; public static final String SASL_JAAS_CONFIG = "sasl.jaas.config"; public static final String SASL_JAAS_CONFIG_DOC = "JAAS login context parameters for SASL connections in the format used by JAAS configuration files. " + "JAAS configuration file format is described <a href=\"https://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/tutorials/LoginConfigFile.html\">here</a>. " + "The format for the value is: <code>loginModuleClass controlFlag (optionName=optionValue)*;</code>. For brokers, " + "the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, " + "listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;"; public static final String SASL_CLIENT_CALLBACK_HANDLER_CLASS = "sasl.client.callback.handler.class"; public static final String SASL_CLIENT_CALLBACK_HANDLER_CLASS_DOC = "The fully qualified name of a SASL client callback handler class " + "that implements the AuthenticateCallbackHandler interface."; public static final String SASL_LOGIN_CALLBACK_HANDLER_CLASS = "sasl.login.callback.handler.class"; public static final String SASL_LOGIN_CALLBACK_HANDLER_CLASS_DOC = "The fully qualified name of a SASL login callback handler class " + "that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with " + "listener prefix and SASL mechanism name in lower-case. For example, " + "listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler"; public static final String SASL_LOGIN_CLASS = "sasl.login.class"; public static final String SASL_LOGIN_CLASS_DOC = "The fully qualified name of a class that implements the Login interface. " + "For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, " + "listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin"; public static final String SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name"; public static final String SASL_KERBEROS_SERVICE_NAME_DOC = "The Kerberos principal name that Kafka runs as. " + "This can be defined either in Kafka's JAAS config or in Kafka's config."; public static final String SASL_KERBEROS_KINIT_CMD = "sasl.kerberos.kinit.cmd"; public static final String SASL_KERBEROS_KINIT_CMD_DOC = "Kerberos kinit command path."; public static final String DEFAULT_KERBEROS_KINIT_CMD = "/usr/bin/kinit"; public static final String SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR = "sasl.kerberos.ticket.renew.window.factor"; public static final String SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR_DOC = "Login thread will sleep until the specified window factor of time from last refresh" + " to ticket's expiry has been reached, at which time it will try to renew the ticket."; public static final double DEFAULT_KERBEROS_TICKET_RENEW_WINDOW_FACTOR = 0.80; public static final String SASL_KERBEROS_TICKET_RENEW_JITTER = "sasl.kerberos.ticket.renew.jitter"; public static final String SASL_KERBEROS_TICKET_RENEW_JITTER_DOC = "Percentage of random jitter added to the renewal time."; public static final double DEFAULT_KERBEROS_TICKET_RENEW_JITTER = 0.05; public static final String SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN = "sasl.kerberos.min.time.before.relogin"; public static final String SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN_DOC = "Login thread sleep time between refresh attempts."; public static final long DEFAULT_KERBEROS_MIN_TIME_BEFORE_RELOGIN = 1 * 60 * 1000L; public static final String SASL_LOGIN_REFRESH_WINDOW_FACTOR = "sasl.login.refresh.window.factor"; public static final String SASL_LOGIN_REFRESH_WINDOW_FACTOR_DOC = "Login refresh thread will sleep until the specified window factor relative to the" + " credential's lifetime has been reached, at which time it will try to refresh the credential." + " Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used" + " if no value is specified." + OAUTHBEARER_NOTE; public static final double DEFAULT_LOGIN_REFRESH_WINDOW_FACTOR = 0.80; public static final String SASL_LOGIN_REFRESH_WINDOW_JITTER = "sasl.login.refresh.window.jitter"; public static final String SASL_LOGIN_REFRESH_WINDOW_JITTER_DOC = "The maximum amount of random jitter relative to the credential's lifetime" + " that is added to the login refresh thread's sleep time. Legal values are between 0 and 0.25 (25%) inclusive;" + " a default value of 0.05 (5%) is used if no value is specified." + OAUTHBEARER_NOTE; public static final double DEFAULT_LOGIN_REFRESH_WINDOW_JITTER = 0.05; public static final String SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS = "sasl.login.refresh.min.period.seconds"; public static final String SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS_DOC = "The desired minimum time for the login refresh thread to wait before refreshing a credential," + " in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and " + " sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential." + OAUTHBEARER_NOTE; public static final short DEFAULT_LOGIN_REFRESH_MIN_PERIOD_SECONDS = 60; public static final String SASL_LOGIN_REFRESH_BUFFER_SECONDS = "sasl.login.refresh.buffer.seconds"; public static final String SASL_LOGIN_REFRESH_BUFFER_SECONDS_DOC = "The amount of buffer time before credential expiration to maintain when refreshing a credential," + " in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain" + " as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified." + " This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential." + OAUTHBEARER_NOTE; public static final short DEFAULT_LOGIN_REFRESH_BUFFER_SECONDS = 300; public static final String SASL_LOGIN_CONNECT_TIMEOUT_MS = "sasl.login.connect.timeout.ms"; public static final String SASL_LOGIN_CONNECT_TIMEOUT_MS_DOC = "The (optional) value in milliseconds for the external authentication provider connection timeout." + OAUTHBEARER_NOTE; public static final String SASL_LOGIN_READ_TIMEOUT_MS = "sasl.login.read.timeout.ms"; public static final String SASL_LOGIN_READ_TIMEOUT_MS_DOC = "The (optional) value in milliseconds for the external authentication provider read timeout." + OAUTHBEARER_NOTE; private static final String LOGIN_EXPONENTIAL_BACKOFF_NOTE = " Login uses an exponential backoff algorithm with an initial wait based on the" + " sasl.login.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the" + " sasl.login.retry.backoff.max.ms setting." + OAUTHBEARER_NOTE; public static final String SASL_LOGIN_RETRY_BACKOFF_MAX_MS = "sasl.login.retry.backoff.max.ms"; public static final long DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MAX_MS = 10000; public static final String SASL_LOGIN_RETRY_BACKOFF_MAX_MS_DOC = "The (optional) value in milliseconds for the maximum wait between login attempts to the" + " external authentication provider." + LOGIN_EXPONENTIAL_BACKOFF_NOTE; public static final String SASL_LOGIN_RETRY_BACKOFF_MS = "sasl.login.retry.backoff.ms"; public static final long DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MS = 100; public static final String SASL_LOGIN_RETRY_BACKOFF_MS_DOC = "The (optional) value in milliseconds for the initial wait between login attempts to the external" + " authentication provider." + LOGIN_EXPONENTIAL_BACKOFF_NOTE; public static final String SASL_OAUTHBEARER_SCOPE_CLAIM_NAME = "sasl.oauthbearer.scope.claim.name"; public static final String DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME = "scope"; public static final String SASL_OAUTHBEARER_SCOPE_CLAIM_NAME_DOC = "The OAuth claim for the scope is often named \"" + DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME + "\", but this (optional)" + " setting can provide a different name to use for the scope included in the JWT payload's claims if the OAuth/OIDC provider uses a different" + " name for that claim."; public static final String SASL_OAUTHBEARER_SUB_CLAIM_NAME = "sasl.oauthbearer.sub.claim.name"; public static final String DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME = "sub"; public static final String SASL_OAUTHBEARER_SUB_CLAIM_NAME_DOC = "The OAuth claim for the subject is often named \"" + DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME + "\", but this (optional)" + " setting can provide a different name to use for the subject included in the JWT payload's claims if the OAuth/OIDC provider uses a different" + " name for that claim."; public static final String SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL = "sasl.oauthbearer.token.endpoint.url"; public static final String SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL_DOC = "The URL for the OAuth/OIDC identity provider. If the URL is HTTP(S)-based, it is the issuer's token" + " endpoint URL to which requests will be made to login based on the configuration in " + SASL_JAAS_CONFIG + ". If the URL is file-based, it" + " specifies a file containing an access token (in JWT serialized form) issued by the OAuth/OIDC identity provider to use for authorization."; public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_URL = "sasl.oauthbearer.jwks.endpoint.url"; public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_URL_DOC = "The OAuth/OIDC provider URL from which the provider's" + " <a href=\"https://datatracker.ietf.org/doc/html/rfc7517#section-5\">JWKS (JSON Web Key Set)</a> can be retrieved. The URL can be HTTP(S)-based or file-based." + " If the URL is HTTP(S)-based, the JWKS data will be retrieved from the OAuth/OIDC provider via the configured URL on broker startup. All then-current" + " keys will be cached on the broker for incoming requests. If an authentication request is received for a JWT that includes a \"kid\" header claim value that" + " isn't yet in the cache, the JWKS endpoint will be queried again on demand. However, the broker polls the URL every sasl.oauthbearer.jwks.endpoint.refresh.ms" + " milliseconds to refresh the cache with any forthcoming keys before any JWT requests that include them are received." + " If the URL is file-based, the broker will load the JWKS file from a configured location on startup. In the event that the JWT includes a \"kid\" header" + " value that isn't in the JWKS file, the broker will reject the JWT and authentication will fail."; public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS = "sasl.oauthbearer.jwks.endpoint.refresh.ms"; public static final long DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS = 60 * 60 * 1000; public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS_DOC = "The (optional) value in milliseconds for the broker to wait between refreshing its JWKS (JSON Web Key Set)" + " cache that contains the keys to verify the signature of the JWT."; private static final String JWKS_EXPONENTIAL_BACKOFF_NOTE = " JWKS retrieval uses an exponential backoff algorithm with an initial wait based on the" + " sasl.oauthbearer.jwks.endpoint.retry.backoff.ms setting and will double in wait length between attempts up to a maximum wait length specified by the" + " sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms setting."; public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS = "sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms"; public static final long DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS = 10000; public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS_DOC = "The (optional) value in milliseconds for the maximum wait between attempts to retrieve the JWKS (JSON Web Key Set)" + " from the external authentication provider." + JWKS_EXPONENTIAL_BACKOFF_NOTE; public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS = "sasl.oauthbearer.jwks.endpoint.retry.backoff.ms"; public static final long DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS = 100; public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS_DOC = "The (optional) value in milliseconds for the initial wait between JWKS (JSON Web Key Set) retrieval attempts from the external" + " authentication provider." + JWKS_EXPONENTIAL_BACKOFF_NOTE; public static final String SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS = "sasl.oauthbearer.clock.skew.seconds"; public static final int DEFAULT_SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS = 30; public static final String SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS_DOC = "The (optional) value in seconds to allow for differences between the time of the OAuth/OIDC identity provider and" + " the broker."; public static final String SASL_OAUTHBEARER_EXPECTED_AUDIENCE = "sasl.oauthbearer.expected.audience"; public static final String SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC = "The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the" + " expected audiences. The JWT will be inspected for the standard OAuth \"aud\" claim and if this value is set, the broker will match the value from JWT's \"aud\" claim " + " to see if there is an exact match. If there is no match, the broker will reject the JWT and authentication will fail."; public static final String SASL_OAUTHBEARER_EXPECTED_ISSUER = "sasl.oauthbearer.expected.issuer"; public static final String SASL_OAUTHBEARER_EXPECTED_ISSUER_DOC = "The (optional) setting for the broker to use to verify that the JWT was created by the expected issuer. The JWT will" + " be inspected for the standard OAuth \"iss\" claim and if this value is set, the broker will match it exactly against what is in the JWT's \"iss\" claim. If there is no" + " match, the broker will reject the JWT and authentication will fail."; public static void addClientSaslSupport(ConfigDef config) { config.define(SaslConfigs.SASL_KERBEROS_SERVICE_NAME, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SaslConfigs.SASL_KERBEROS_SERVICE_NAME_DOC) .define(SaslConfigs.SASL_KERBEROS_KINIT_CMD, ConfigDef.Type.STRING, SaslConfigs.DEFAULT_KERBEROS_KINIT_CMD, ConfigDef.Importance.LOW, SaslConfigs.SASL_KERBEROS_KINIT_CMD_DOC) .define(SaslConfigs.SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR, ConfigDef.Type.DOUBLE, SaslConfigs.DEFAULT_KERBEROS_TICKET_RENEW_WINDOW_FACTOR, ConfigDef.Importance.LOW, SaslConfigs.SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR_DOC) .define(SaslConfigs.SASL_KERBEROS_TICKET_RENEW_JITTER, ConfigDef.Type.DOUBLE, SaslConfigs.DEFAULT_KERBEROS_TICKET_RENEW_JITTER, ConfigDef.Importance.LOW, SaslConfigs.SASL_KERBEROS_TICKET_RENEW_JITTER_DOC) .define(SaslConfigs.SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN, ConfigDef.Type.LONG, SaslConfigs.DEFAULT_KERBEROS_MIN_TIME_BEFORE_RELOGIN, ConfigDef.Importance.LOW, SaslConfigs.SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN_DOC) .define(SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_FACTOR, ConfigDef.Type.DOUBLE, SaslConfigs.DEFAULT_LOGIN_REFRESH_WINDOW_FACTOR, Range.between(0.5, 1.0), ConfigDef.Importance.LOW, SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_FACTOR_DOC) .define(SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_JITTER, ConfigDef.Type.DOUBLE, SaslConfigs.DEFAULT_LOGIN_REFRESH_WINDOW_JITTER, Range.between(0.0, 0.25), ConfigDef.Importance.LOW, SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_JITTER_DOC) .define(SaslConfigs.SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS, ConfigDef.Type.SHORT, SaslConfigs.DEFAULT_LOGIN_REFRESH_MIN_PERIOD_SECONDS, Range.between(0, 900), ConfigDef.Importance.LOW, SaslConfigs.SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS_DOC) .define(SaslConfigs.SASL_LOGIN_REFRESH_BUFFER_SECONDS, ConfigDef.Type.SHORT, SaslConfigs.DEFAULT_LOGIN_REFRESH_BUFFER_SECONDS, Range.between(0, 3600), ConfigDef.Importance.LOW, SaslConfigs.SASL_LOGIN_REFRESH_BUFFER_SECONDS_DOC) .define(SaslConfigs.SASL_MECHANISM, ConfigDef.Type.STRING, SaslConfigs.DEFAULT_SASL_MECHANISM, ConfigDef.Importance.MEDIUM, SaslConfigs.SASL_MECHANISM_DOC) .define(SaslConfigs.SASL_JAAS_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.MEDIUM, SaslConfigs.SASL_JAAS_CONFIG_DOC) .define(SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS, ConfigDef.Type.CLASS, null, ConfigDef.Importance.MEDIUM, SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS_DOC) .define(SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, ConfigDef.Type.CLASS, null, ConfigDef.Importance.MEDIUM, SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS_DOC) .define(SaslConfigs.SASL_LOGIN_CLASS, ConfigDef.Type.CLASS, null, ConfigDef.Importance.MEDIUM, SaslConfigs.SASL_LOGIN_CLASS_DOC) .define(SaslConfigs.SASL_LOGIN_CONNECT_TIMEOUT_MS, ConfigDef.Type.INT, null, ConfigDef.Importance.LOW, SASL_LOGIN_CONNECT_TIMEOUT_MS_DOC) .define(SaslConfigs.SASL_LOGIN_READ_TIMEOUT_MS, ConfigDef.Type.INT, null, ConfigDef.Importance.LOW, SASL_LOGIN_READ_TIMEOUT_MS_DOC) .define(SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MAX_MS, ConfigDef.Type.LONG, DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MAX_MS, ConfigDef.Importance.LOW, SASL_LOGIN_RETRY_BACKOFF_MAX_MS_DOC) .define(SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MS, ConfigDef.Type.LONG, DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MS, ConfigDef.Importance.LOW, SASL_LOGIN_RETRY_BACKOFF_MS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, ConfigDef.Type.STRING, DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_SCOPE_CLAIM_NAME_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME, ConfigDef.Type.STRING, DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_SUB_CLAIM_NAME_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_JWKS_ENDPOINT_URL_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS, ConfigDef.Type.LONG, DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS, ConfigDef.Type.LONG, DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS, ConfigDef.Type.LONG, DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, ConfigDef.Type.INT, DEFAULT_SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE, ConfigDef.Type.LIST, null, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER, ConfigDef.Type.STRING, null, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_EXPECTED_ISSUER_DOC); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/SecurityConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; /** * Contains the common security config for SSL and SASL */ public class SecurityConfig { public static final String SECURITY_PROVIDERS_CONFIG = "security.providers"; public static final String SECURITY_PROVIDERS_DOC = "A list of configurable creator classes each returning a provider" + " implementing security algorithms. These classes should implement the" + " <code>org.apache.kafka.common.security.auth.SecurityProviderCreator</code> interface."; }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/SslClientAuth.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; /** * Describes whether the server should require or request client authentication. */ public enum SslClientAuth { REQUIRED, REQUESTED, NONE; public static final List<SslClientAuth> VALUES = Collections.unmodifiableList(Arrays.asList(SslClientAuth.values())); public static SslClientAuth forConfig(String key) { if (key == null) { return SslClientAuth.NONE; } String upperCaseKey = key.toUpperCase(Locale.ROOT); for (SslClientAuth auth : VALUES) { if (auth.name().equals(upperCaseKey)) { return auth; } } return null; } @Override public String toString() { return super.toString().toLowerCase(Locale.ROOT); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/SslConfigs.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.config.internals.BrokerSecurityConfigs; import org.apache.kafka.common.utils.Java; import org.apache.kafka.common.utils.Utils; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; import java.util.Set; public class SslConfigs { /* * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. */ public static final String SSL_PROTOCOL_CONFIG = "ssl.protocol"; public static final String SSL_PROTOCOL_DOC = "The SSL protocol used to generate the SSLContext. " + "The default is 'TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. " + "This value should be fine for most use cases. " + "Allowed values in recent JVMs are 'TLSv1.2' and 'TLSv1.3'. 'TLS', 'TLSv1.1', 'SSL', 'SSLv2' and 'SSLv3' " + "may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. " + "With the default value for this config and 'ssl.enabled.protocols', clients will downgrade to 'TLSv1.2' if " + "the server does not support 'TLSv1.3'. If this config is set to 'TLSv1.2', clients will not use 'TLSv1.3' even " + "if it is one of the values in ssl.enabled.protocols and the server only supports 'TLSv1.3'."; public static final String DEFAULT_SSL_PROTOCOL; public static final String SSL_PROVIDER_CONFIG = "ssl.provider"; public static final String SSL_PROVIDER_DOC = "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM."; public static final String SSL_CIPHER_SUITES_CONFIG = "ssl.cipher.suites"; public static final String SSL_CIPHER_SUITES_DOC = "A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. " + "By default all the available cipher suites are supported."; public static final String SSL_ENABLED_PROTOCOLS_CONFIG = "ssl.enabled.protocols"; public static final String SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. " + "The default is 'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. With the " + "default value for Java 11, clients and servers will prefer TLSv1.3 if both support it and fallback " + "to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most " + "cases. Also see the config documentation for `ssl.protocol`."; public static final String DEFAULT_SSL_ENABLED_PROTOCOLS; static { if (Java.IS_JAVA11_COMPATIBLE) { DEFAULT_SSL_PROTOCOL = "TLSv1.3"; DEFAULT_SSL_ENABLED_PROTOCOLS = "TLSv1.2,TLSv1.3"; } else { DEFAULT_SSL_PROTOCOL = "TLSv1.2"; DEFAULT_SSL_ENABLED_PROTOCOLS = "TLSv1.2"; } } public static final String SSL_KEYSTORE_TYPE_CONFIG = "ssl.keystore.type"; public static final String SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. " + "This is optional for client. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM]."; public static final String DEFAULT_SSL_KEYSTORE_TYPE = "JKS"; public static final String SSL_KEYSTORE_KEY_CONFIG = "ssl.keystore.key"; public static final String SSL_KEYSTORE_KEY_DOC = "Private key in the format specified by 'ssl.keystore.type'. " + "Default SSL engine factory supports only PEM format with PKCS#8 keys. If the key is encrypted, " + "key password must be specified using 'ssl.key.password'"; public static final String SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG = "ssl.keystore.certificate.chain"; public static final String SSL_KEYSTORE_CERTIFICATE_CHAIN_DOC = "Certificate chain in the format specified by 'ssl.keystore.type'. " + "Default SSL engine factory supports only PEM format with a list of X.509 certificates"; public static final String SSL_TRUSTSTORE_CERTIFICATES_CONFIG = "ssl.truststore.certificates"; public static final String SSL_TRUSTSTORE_CERTIFICATES_DOC = "Trusted certificates in the format specified by 'ssl.truststore.type'. " + "Default SSL engine factory supports only PEM format with X.509 certificates."; public static final String SSL_KEYSTORE_LOCATION_CONFIG = "ssl.keystore.location"; public static final String SSL_KEYSTORE_LOCATION_DOC = "The location of the key store file. " + "This is optional for client and can be used for two-way authentication for client."; public static final String SSL_KEYSTORE_PASSWORD_CONFIG = "ssl.keystore.password"; public static final String SSL_KEYSTORE_PASSWORD_DOC = "The store password for the key store file. " + "This is optional for client and only needed if 'ssl.keystore.location' is configured. " + "Key store password is not supported for PEM format."; public static final String SSL_KEY_PASSWORD_CONFIG = "ssl.key.password"; public static final String SSL_KEY_PASSWORD_DOC = "The password of the private key in the key store file or " + "the PEM key specified in 'ssl.keystore.key'."; public static final String SSL_TRUSTSTORE_TYPE_CONFIG = "ssl.truststore.type"; public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM]."; public static final String DEFAULT_SSL_TRUSTSTORE_TYPE = "JKS"; public static final String SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location"; public static final String SSL_TRUSTSTORE_LOCATION_DOC = "The location of the trust store file."; public static final String SSL_TRUSTSTORE_PASSWORD_CONFIG = "ssl.truststore.password"; public static final String SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the trust store file. " + "If a password is not set, trust store file configured will still be used, but integrity checking is disabled. " + "Trust store password is not supported for PEM format."; public static final String SSL_KEYMANAGER_ALGORITHM_CONFIG = "ssl.keymanager.algorithm"; public static final String SSL_KEYMANAGER_ALGORITHM_DOC = "The algorithm used by key manager factory for SSL connections. " + "Default value is the key manager factory algorithm configured for the Java Virtual Machine."; public static final String DEFAULT_SSL_KEYMANGER_ALGORITHM = KeyManagerFactory.getDefaultAlgorithm(); public static final String SSL_TRUSTMANAGER_ALGORITHM_CONFIG = "ssl.trustmanager.algorithm"; public static final String SSL_TRUSTMANAGER_ALGORITHM_DOC = "The algorithm used by trust manager factory for SSL connections. " + "Default value is the trust manager factory algorithm configured for the Java Virtual Machine."; public static final String DEFAULT_SSL_TRUSTMANAGER_ALGORITHM = TrustManagerFactory.getDefaultAlgorithm(); public static final String SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG = "ssl.endpoint.identification.algorithm"; public static final String SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC = "The endpoint identification algorithm to validate server hostname using server certificate. "; public static final String DEFAULT_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM = "https"; public static final String SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG = "ssl.secure.random.implementation"; public static final String SSL_SECURE_RANDOM_IMPLEMENTATION_DOC = "The SecureRandom PRNG implementation to use for SSL cryptography operations. "; public static final String SSL_ENGINE_FACTORY_CLASS_CONFIG = "ssl.engine.factory.class"; public static final String SSL_ENGINE_FACTORY_CLASS_DOC = "The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory"; public static void addClientSslSupport(ConfigDef config) { config.define(SslConfigs.SSL_PROTOCOL_CONFIG, ConfigDef.Type.STRING, SslConfigs.DEFAULT_SSL_PROTOCOL, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_PROTOCOL_DOC) .define(SslConfigs.SSL_PROVIDER_CONFIG, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_PROVIDER_DOC) .define(SslConfigs.SSL_CIPHER_SUITES_CONFIG, ConfigDef.Type.LIST, null, ConfigDef.Importance.LOW, SslConfigs.SSL_CIPHER_SUITES_DOC) .define(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, ConfigDef.Type.LIST, SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_ENABLED_PROTOCOLS_DOC) .define(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, ConfigDef.Type.STRING, SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_KEYSTORE_TYPE_DOC) .define(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, ConfigDef.Type.STRING, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_LOCATION_DOC) .define(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_PASSWORD_DOC) .define(SslConfigs.SSL_KEY_PASSWORD_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEY_PASSWORD_DOC) .define(SslConfigs.SSL_KEYSTORE_KEY_CONFIG, Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_KEY_DOC) .define(SslConfigs.SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_CERTIFICATE_CHAIN_DOC) .define(SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_DOC) .define(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, ConfigDef.Type.STRING, SslConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_TRUSTSTORE_TYPE_DOC) .define(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, ConfigDef.Type.STRING, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_TRUSTSTORE_LOCATION_DOC) .define(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_TRUSTSTORE_PASSWORD_DOC) .define(SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, ConfigDef.Type.STRING, SslConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM, ConfigDef.Importance.LOW, SslConfigs.SSL_KEYMANAGER_ALGORITHM_DOC) .define(SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, ConfigDef.Type.STRING, SslConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM, ConfigDef.Importance.LOW, SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC) .define(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, ConfigDef.Type.STRING, SslConfigs.DEFAULT_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM, ConfigDef.Importance.LOW, SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC) .define(SslConfigs.SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG, ConfigDef.Type.STRING, null, ConfigDef.Importance.LOW, SslConfigs.SSL_SECURE_RANDOM_IMPLEMENTATION_DOC) .define(SslConfigs.SSL_ENGINE_FACTORY_CLASS_CONFIG, ConfigDef.Type.CLASS, null, ConfigDef.Importance.LOW, SslConfigs.SSL_ENGINE_FACTORY_CLASS_DOC); } public static final Set<String> RECONFIGURABLE_CONFIGS = Utils.mkSet( SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, SslConfigs.SSL_KEY_PASSWORD_CONFIG, SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, SslConfigs.SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG, SslConfigs.SSL_KEYSTORE_KEY_CONFIG, SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_CONFIG); public static final Set<String> NON_RECONFIGURABLE_CONFIGS = Utils.mkSet( BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG, SslConfigs.SSL_PROTOCOL_CONFIG, SslConfigs.SSL_PROVIDER_CONFIG, SslConfigs.SSL_CIPHER_SUITES_CONFIG, SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, SslConfigs.SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG, SslConfigs.SSL_ENGINE_FACTORY_CLASS_CONFIG); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/TopicConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config; /** * <p>Keys that can be used to configure a topic. These keys are useful when creating or reconfiguring a * topic using the AdminClient. * * <p>The intended pattern is for broker configs to include a <code>`log.`</code> prefix. For example, to set the default broker * cleanup policy, one would set <code>log.cleanup.policy</code> instead of <code>cleanup.policy</code>. Unfortunately, there are many cases * where this pattern is not followed. */ // This is a public API, so we should not remove or alter keys without a discussion and a deprecation period. // Eventually this should replace LogConfig.scala. public class TopicConfig { public static final String SEGMENT_BYTES_CONFIG = "segment.bytes"; public static final String SEGMENT_BYTES_DOC = "This configuration controls the segment file size for " + "the log. Retention and cleaning is always done a file at a time so a larger segment size means " + "fewer files but less granular control over retention."; public static final String SEGMENT_MS_CONFIG = "segment.ms"; public static final String SEGMENT_MS_DOC = "This configuration controls the period of time after " + "which Kafka will force the log to roll even if the segment file isn't full to ensure that retention " + "can delete or compact old data."; public static final String SEGMENT_JITTER_MS_CONFIG = "segment.jitter.ms"; public static final String SEGMENT_JITTER_MS_DOC = "The maximum random jitter subtracted from the scheduled " + "segment roll time to avoid thundering herds of segment rolling"; public static final String SEGMENT_INDEX_BYTES_CONFIG = "segment.index.bytes"; public static final String SEGMENT_INDEX_BYTES_DOC = "This configuration controls the size of the index that " + "maps offsets to file positions. We preallocate this index file and shrink it only after log " + "rolls. You generally should not need to change this setting."; public static final String FLUSH_MESSAGES_INTERVAL_CONFIG = "flush.messages"; public static final String FLUSH_MESSAGES_INTERVAL_DOC = "This setting allows specifying an interval at " + "which we will force an fsync of data written to the log. For example if this was set to 1 " + "we would fsync after every message; if it were 5 we would fsync after every five messages. " + "In general we recommend you not set this and use replication for durability and allow the " + "operating system's background flush capabilities as it is more efficient. This setting can " + "be overridden on a per-topic basis (see <a href=\"#topicconfigs\">the per-topic configuration section</a>)."; public static final String FLUSH_MS_CONFIG = "flush.ms"; public static final String FLUSH_MS_DOC = "This setting allows specifying a time interval at which we will " + "force an fsync of data written to the log. For example if this was set to 1000 " + "we would fsync after 1000 ms had passed. In general we recommend you not set " + "this and use replication for durability and allow the operating system's background " + "flush capabilities as it is more efficient."; public static final String RETENTION_BYTES_CONFIG = "retention.bytes"; public static final String RETENTION_BYTES_DOC = "This configuration controls the maximum size a partition " + "(which consists of log segments) can grow to before we will discard old log segments to free up space if we " + "are using the \"delete\" retention policy. By default there is no size limit only a time limit. " + "Since this limit is enforced at the partition level, multiply it by the number of partitions to compute " + "the topic retention in bytes."; public static final String RETENTION_MS_CONFIG = "retention.ms"; public static final String RETENTION_MS_DOC = "This configuration controls the maximum time we will retain a " + "log before we will discard old log segments to free up space if we are using the " + "\"delete\" retention policy. This represents an SLA on how soon consumers must read " + "their data. If set to -1, no time limit is applied."; public static final String REMOTE_LOG_STORAGE_ENABLE_CONFIG = "remote.storage.enable"; public static final String REMOTE_LOG_STORAGE_ENABLE_DOC = "To enable tier storage for a topic, set `remote.storage.enable` as true. " + "You can not disable this config once it is enabled. It will be provided in future versions."; public static final String LOCAL_LOG_RETENTION_MS_CONFIG = "local.retention.ms"; public static final String LOCAL_LOG_RETENTION_MS_DOC = "The number of milli seconds to keep the local log segment before it gets deleted. " + "Default value is -2, it represents `retention.ms` value is to be used. The effective value should always be less than or equal " + "to `retention.ms` value."; public static final String LOCAL_LOG_RETENTION_BYTES_CONFIG = "local.retention.bytes"; public static final String LOCAL_LOG_RETENTION_BYTES_DOC = "The maximum size of local log segments that can grow for a partition before it " + "deletes the old segments. Default value is -2, it represents `retention.bytes` value to be used. The effective value should always be " + "less than or equal to `retention.bytes` value."; public static final String MAX_MESSAGE_BYTES_CONFIG = "max.message.bytes"; public static final String MAX_MESSAGE_BYTES_DOC = "The largest record batch size allowed by Kafka (after compression if compression is enabled). " + "If this is increased and there are consumers older than 0.10.2, the consumers' fetch " + "size must also be increased so that they can fetch record batches this large. " + "In the latest message format version, records are always grouped into batches for efficiency. " + "In previous message format versions, uncompressed records are not grouped into batches and this " + "limit only applies to a single record in that case."; public static final String INDEX_INTERVAL_BYTES_CONFIG = "index.interval.bytes"; public static final String INDEX_INTERVAL_BYTES_DOC = "This setting controls how frequently " + "Kafka adds an index entry to its offset index. The default setting ensures that we index a " + "message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact " + "position in the log but makes the index larger. You probably don't need to change this."; public static final String FILE_DELETE_DELAY_MS_CONFIG = "file.delete.delay.ms"; public static final String FILE_DELETE_DELAY_MS_DOC = "The time to wait before deleting a file from the " + "filesystem"; public static final String DELETE_RETENTION_MS_CONFIG = "delete.retention.ms"; public static final String DELETE_RETENTION_MS_DOC = "The amount of time to retain delete tombstone markers " + "for <a href=\"#compaction\">log compacted</a> topics. This setting also gives a bound " + "on the time in which a consumer must complete a read if they begin from offset 0 " + "to ensure that they get a valid snapshot of the final stage (otherwise delete " + "tombstones may be collected before they complete their scan)."; public static final String MIN_COMPACTION_LAG_MS_CONFIG = "min.compaction.lag.ms"; public static final String MIN_COMPACTION_LAG_MS_DOC = "The minimum time a message will remain " + "uncompacted in the log. Only applicable for logs that are being compacted."; public static final String MAX_COMPACTION_LAG_MS_CONFIG = "max.compaction.lag.ms"; public static final String MAX_COMPACTION_LAG_MS_DOC = "The maximum time a message will remain " + "ineligible for compaction in the log. Only applicable for logs that are being compacted."; public static final String MIN_CLEANABLE_DIRTY_RATIO_CONFIG = "min.cleanable.dirty.ratio"; public static final String MIN_CLEANABLE_DIRTY_RATIO_DOC = "This configuration controls how frequently " + "the log compactor will attempt to clean the log (assuming <a href=\"#compaction\">log " + "compaction</a> is enabled). By default we will avoid cleaning a log where more than " + "50% of the log has been compacted. This ratio bounds the maximum space wasted in " + "the log by duplicates (at 50% at most 50% of the log could be duplicates). A " + "higher ratio will mean fewer, more efficient cleanings but will mean more wasted " + "space in the log. If the " + MAX_COMPACTION_LAG_MS_CONFIG + " or the " + MIN_COMPACTION_LAG_MS_CONFIG + " configurations are also specified, then the log compactor considers the log to be eligible for compaction " + "as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) " + "records for at least the " + MIN_COMPACTION_LAG_MS_CONFIG + " duration, or (ii) if the log has had " + "dirty (uncompacted) records for at most the " + MAX_COMPACTION_LAG_MS_CONFIG + " period."; public static final String CLEANUP_POLICY_CONFIG = "cleanup.policy"; public static final String CLEANUP_POLICY_COMPACT = "compact"; public static final String CLEANUP_POLICY_DELETE = "delete"; public static final String CLEANUP_POLICY_DOC = "This config designates the retention policy to " + "use on log segments. The \"delete\" policy (which is the default) will discard old segments " + "when their retention time or size limit has been reached. The \"compact\" policy will enable " + "<a href=\"#compaction\">log compaction</a>, which retains the latest value for each key. " + "It is also possible to specify both policies in a comma-separated list (e.g. \"delete,compact\"). " + "In this case, old segments will be discarded per the retention time and size configuration, " + "while retained segments will be compacted."; public static final String UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG = "unclean.leader.election.enable"; public static final String UNCLEAN_LEADER_ELECTION_ENABLE_DOC = "Indicates whether to enable replicas " + "not in the ISR set to be elected as leader as a last resort, even though doing so may result in data " + "loss."; public static final String MIN_IN_SYNC_REPLICAS_CONFIG = "min.insync.replicas"; public static final String MIN_IN_SYNC_REPLICAS_DOC = "When a producer sets acks to \"all\" (or \"-1\"), " + "this configuration specifies the minimum number of replicas that must acknowledge " + "a write for the write to be considered successful. If this minimum cannot be met, " + "then the producer will raise an exception (either NotEnoughReplicas or " + "NotEnoughReplicasAfterAppend).<br>When used together, <code>min.insync.replicas</code> and <code>acks</code> " + "allow you to enforce greater durability guarantees. A typical scenario would be to " + "create a topic with a replication factor of 3, set <code>min.insync.replicas</code> to 2, and " + "produce with <code>acks</code> of \"all\". This will ensure that the producer raises an exception " + "if a majority of replicas do not receive a write."; public static final String COMPRESSION_TYPE_CONFIG = "compression.type"; public static final String COMPRESSION_TYPE_DOC = "Specify the final compression type for a given topic. " + "This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally " + "accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the " + "original compression codec set by the producer."; public static final String PREALLOCATE_CONFIG = "preallocate"; public static final String PREALLOCATE_DOC = "True if we should preallocate the file on disk when " + "creating a new log segment."; /** * @deprecated since 3.0, removal planned in 4.0. The default value for this config is appropriate * for most situations. */ @Deprecated public static final String MESSAGE_FORMAT_VERSION_CONFIG = "message.format.version"; /** * @deprecated since 3.0, removal planned in 4.0. The default value for this config is appropriate * for most situations. */ @Deprecated public static final String MESSAGE_FORMAT_VERSION_DOC = "[DEPRECATED] Specify the message format version the broker " + "will use to append messages to the logs. The value of this config is always assumed to be `3.0` if " + "`inter.broker.protocol.version` is 3.0 or higher (the actual config value is ignored). Otherwise, the value should " + "be a valid ApiVersion. Some examples are: 0.10.0, 1.1, 2.8, 3.0. By setting a particular message format version, the " + "user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting " + "this value incorrectly will cause consumers with older versions to break as they will receive messages with a format " + "that they don't understand."; public static final String MESSAGE_TIMESTAMP_TYPE_CONFIG = "message.timestamp.type"; public static final String MESSAGE_TIMESTAMP_TYPE_DOC = "Define whether the timestamp in the message is " + "message create time or log append time. The value should be either `CreateTime` or `LogAppendTime`"; public static final String MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG = "message.timestamp.difference.max.ms"; public static final String MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC = "The maximum difference allowed between " + "the timestamp when a broker receives a message and the timestamp specified in the message. If " + "message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp " + "exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime."; public static final String MESSAGE_DOWNCONVERSION_ENABLE_CONFIG = "message.downconversion.enable"; public static final String MESSAGE_DOWNCONVERSION_ENABLE_DOC = "This configuration controls whether " + "down-conversion of message formats is enabled to satisfy consume requests. When set to <code>false</code>, " + "broker will not perform down-conversion for consumers expecting an older message format. The broker responds " + "with <code>UNSUPPORTED_VERSION</code> error for consume requests from such older clients. This configuration" + "does not apply to any message format conversion that might be required for replication to followers."; }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides common mechanisms for defining, parsing, validating, and documenting user-configurable parameters. */ package org.apache.kafka.common.config;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config.internals; import org.apache.kafka.common.config.SaslConfigs; import java.util.Collections; import java.util.List; /** * Common home for broker-side security configs which need to be accessible from the libraries shared * between the broker and the client. * * Note this is an internal API and subject to change without notice. */ public class BrokerSecurityConfigs { public static final String PRINCIPAL_BUILDER_CLASS_CONFIG = "principal.builder.class"; public static final String SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONFIG = "sasl.kerberos.principal.to.local.rules"; public static final String SSL_CLIENT_AUTH_CONFIG = "ssl.client.auth"; public static final String SASL_ENABLED_MECHANISMS_CONFIG = "sasl.enabled.mechanisms"; public static final String SASL_SERVER_CALLBACK_HANDLER_CLASS = "sasl.server.callback.handler.class"; public static final String SSL_PRINCIPAL_MAPPING_RULES_CONFIG = "ssl.principal.mapping.rules"; public static final String CONNECTIONS_MAX_REAUTH_MS = "connections.max.reauth.ms"; public static final int DEFAULT_SASL_SERVER_MAX_RECEIVE_SIZE = 524288; public static final String SASL_SERVER_MAX_RECEIVE_SIZE_CONFIG = "sasl.server.max.receive.size"; public static final String PRINCIPAL_BUILDER_CLASS_DOC = "The fully qualified name of a class that implements the " + "KafkaPrincipalBuilder interface, which is used to build the KafkaPrincipal object used during " + "authorization. If no principal builder is defined, the default behavior depends " + "on the security protocol in use. For SSL authentication, the principal will be derived using the " + "rules defined by <code>" + SSL_PRINCIPAL_MAPPING_RULES_CONFIG + "</code> applied on the distinguished " + "name from the client certificate if one is provided; otherwise, if client authentication is not required, " + "the principal name will be ANONYMOUS. For SASL authentication, the principal will be derived using the " + "rules defined by <code>" + SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONFIG + "</code> if GSSAPI is in use, " + "and the SASL authentication ID for other mechanisms. For PLAINTEXT, the principal will be ANONYMOUS."; public static final String SSL_PRINCIPAL_MAPPING_RULES_DOC = "A list of rules for mapping from distinguished name" + " from the client certificate to short name. The rules are evaluated in order and the first rule that matches" + " a principal name is used to map it to a short name. Any later rules in the list are ignored. By default," + " distinguished name of the X.500 certificate will be the principal. For more details on the format please" + " see <a href=\"#security_authz\"> security authorization and acls</a>. Note that this configuration is ignored" + " if an extension of KafkaPrincipalBuilder is provided by the <code>" + PRINCIPAL_BUILDER_CLASS_CONFIG + "</code>" + " configuration."; public static final String DEFAULT_SSL_PRINCIPAL_MAPPING_RULES = "DEFAULT"; public static final String SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC = "A list of rules for mapping from principal " + "names to short names (typically operating system usernames). The rules are evaluated in order and the " + "first rule that matches a principal name is used to map it to a short name. Any later rules in the list are " + "ignored. By default, principal names of the form <code>{username}/{hostname}@{REALM}</code> are mapped " + "to <code>{username}</code>. For more details on the format please see <a href=\"#security_authz\"> " + "security authorization and acls</a>. Note that this configuration is ignored if an extension of " + "<code>KafkaPrincipalBuilder</code> is provided by the <code>" + PRINCIPAL_BUILDER_CLASS_CONFIG + "</code> configuration."; public static final List<String> DEFAULT_SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES = Collections.singletonList("DEFAULT"); public static final String SSL_CLIENT_AUTH_DOC = "Configures kafka broker to request client authentication." + " The following settings are common: " + " <ul>" + " <li><code>ssl.client.auth=required</code> If set to required client authentication is required." + " <li><code>ssl.client.auth=requested</code> This means client authentication is optional." + " unlike required, if this option is set client can choose not to provide authentication information about itself" + " <li><code>ssl.client.auth=none</code> This means client authentication is not needed." + "</ul>"; public static final String SASL_ENABLED_MECHANISMS_DOC = "The list of SASL mechanisms enabled in the Kafka server. " + "The list may contain any mechanism for which a security provider is available. " + "Only GSSAPI is enabled by default."; public static final List<String> DEFAULT_SASL_ENABLED_MECHANISMS = Collections.singletonList(SaslConfigs.GSSAPI_MECHANISM); public static final String SASL_SERVER_CALLBACK_HANDLER_CLASS_DOC = "The fully qualified name of a SASL server callback handler " + "class that implements the AuthenticateCallbackHandler interface. Server callback handlers must be prefixed with " + "listener prefix and SASL mechanism name in lower-case. For example, " + "listener.name.sasl_ssl.plain.sasl.server.callback.handler.class=com.example.CustomPlainCallbackHandler."; public static final String CONNECTIONS_MAX_REAUTH_MS_DOC = "When explicitly set to a positive number (the default is 0, not a positive number), " + "a session lifetime that will not exceed the configured value will be communicated to v2.2.0 or later clients when they authenticate. " + "The broker will disconnect any such connection that is not re-authenticated within the session lifetime and that is then subsequently " + "used for any purpose other than re-authentication. Configuration names can optionally be prefixed with listener prefix and SASL " + "mechanism name in lower-case. For example, listener.name.sasl_ssl.oauthbearer.connections.max.reauth.ms=3600000"; public static final String SASL_SERVER_MAX_RECEIVE_SIZE_DOC = "The maximum receive size allowed before and during initial SASL authentication." + " Default receive size is 512KB. GSSAPI limits requests to 64K, but we allow upto 512KB by default for custom SASL mechanisms. In practice," + " PLAIN, SCRAM and OAUTH mechanisms can use much smaller limits."; }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/internals/QuotaConfigs.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config.internals; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.security.scram.internals.ScramMechanism; import java.util.Arrays; import java.util.HashSet; import java.util.Set; /** * Define the dynamic quota configs. Note that these are not normal configurations that exist in properties files. They * only exist dynamically in the controller (or ZK, depending on which mode the cluster is running). */ public class QuotaConfigs { public static final String PRODUCER_BYTE_RATE_OVERRIDE_CONFIG = "producer_byte_rate"; public static final String CONSUMER_BYTE_RATE_OVERRIDE_CONFIG = "consumer_byte_rate"; public static final String REQUEST_PERCENTAGE_OVERRIDE_CONFIG = "request_percentage"; public static final String CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG = "controller_mutation_rate"; public static final String IP_CONNECTION_RATE_OVERRIDE_CONFIG = "connection_creation_rate"; public static final String PRODUCER_BYTE_RATE_DOC = "A rate representing the upper bound (bytes/sec) for producer traffic."; public static final String CONSUMER_BYTE_RATE_DOC = "A rate representing the upper bound (bytes/sec) for consumer traffic."; public static final String REQUEST_PERCENTAGE_DOC = "A percentage representing the upper bound of time spent for processing requests."; public static final String CONTROLLER_MUTATION_RATE_DOC = "The rate at which mutations are accepted for the create " + "topics request, the create partitions request and the delete topics request. The rate is accumulated by " + "the number of partitions created or deleted."; public static final String IP_CONNECTION_RATE_DOC = "An int representing the upper bound of connections accepted " + "for the specified IP."; public static final int IP_CONNECTION_RATE_DEFAULT = Integer.MAX_VALUE; private final static Set<String> USER_AND_CLIENT_QUOTA_NAMES = new HashSet<>(Arrays.asList( PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, REQUEST_PERCENTAGE_OVERRIDE_CONFIG, CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG )); private static void buildUserClientQuotaConfigDef(ConfigDef configDef) { configDef.define(PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, ConfigDef.Type.LONG, Long.MAX_VALUE, ConfigDef.Importance.MEDIUM, PRODUCER_BYTE_RATE_DOC); configDef.define(CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, ConfigDef.Type.LONG, Long.MAX_VALUE, ConfigDef.Importance.MEDIUM, CONSUMER_BYTE_RATE_DOC); configDef.define(REQUEST_PERCENTAGE_OVERRIDE_CONFIG, ConfigDef.Type.DOUBLE, Integer.valueOf(Integer.MAX_VALUE).doubleValue(), ConfigDef.Importance.MEDIUM, REQUEST_PERCENTAGE_DOC); configDef.define(CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG, ConfigDef.Type.DOUBLE, Integer.valueOf(Integer.MAX_VALUE).doubleValue(), ConfigDef.Importance.MEDIUM, CONTROLLER_MUTATION_RATE_DOC); } public static boolean isClientOrUserConfig(String name) { return USER_AND_CLIENT_QUOTA_NAMES.contains(name); } public static ConfigDef userAndClientQuotaConfigs() { ConfigDef configDef = new ConfigDef(); buildUserClientQuotaConfigDef(configDef); return configDef; } public static ConfigDef scramMechanismsPlusUserAndClientQuotaConfigs() { ConfigDef configDef = new ConfigDef(); ScramMechanism.mechanismNames().forEach(mechanismName -> { configDef.define(mechanismName, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, "User credentials for SCRAM mechanism " + mechanismName); }); buildUserClientQuotaConfigDef(configDef); return configDef; } public static ConfigDef ipConfigs() { ConfigDef configDef = new ConfigDef(); configDef.define(IP_CONNECTION_RATE_OVERRIDE_CONFIG, ConfigDef.Type.INT, Integer.MAX_VALUE, ConfigDef.Range.atLeast(0), ConfigDef.Importance.MEDIUM, IP_CONNECTION_RATE_DOC); return configDef; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/provider/ConfigProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config.provider; import org.apache.kafka.common.Configurable; import org.apache.kafka.common.config.ConfigChangeCallback; import org.apache.kafka.common.config.ConfigData; import java.io.Closeable; import java.util.Set; /** * A provider of configuration data, which may optionally support subscriptions to configuration changes. * Implementations are required to safely support concurrent calls to any of the methods in this interface. * Kafka Connect discovers configuration providers using Java's Service Provider mechanism (see {@code java.util.ServiceLoader}). * To support this, implementations of this interface should also contain a service provider configuration file in {@code META-INF/service/org.apache.kafka.common.config.provider.ConfigProvider}. */ public interface ConfigProvider extends Configurable, Closeable { /** * Retrieves the data at the given path. * * @param path the path where the data resides * @return the configuration data */ ConfigData get(String path); /** * Retrieves the data with the given keys at the given path. * * @param path the path where the data resides * @param keys the keys whose values will be retrieved * @return the configuration data */ ConfigData get(String path, Set<String> keys); /** * Subscribes to changes for the given keys at the given path (optional operation). * * @param path the path where the data resides * @param keys the keys whose values will be retrieved * @param callback the callback to invoke upon change * @throws {@link UnsupportedOperationException} if the subscribe operation is not supported */ default void subscribe(String path, Set<String> keys, ConfigChangeCallback callback) { throw new UnsupportedOperationException(); } /** * Unsubscribes to changes for the given keys at the given path (optional operation). * * @param path the path where the data resides * @param keys the keys whose values will be retrieved * @param callback the callback to be unsubscribed from changes * @throws {@link UnsupportedOperationException} if the unsubscribe operation is not supported */ default void unsubscribe(String path, Set<String> keys, ConfigChangeCallback callback) { throw new UnsupportedOperationException(); } /** * Clears all subscribers (optional operation). * * @throws {@link UnsupportedOperationException} if the unsubscribeAll operation is not supported */ default void unsubscribeAll() { throw new UnsupportedOperationException(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/provider/DirectoryConfigProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config.provider; import org.apache.kafka.common.config.ConfigData; import org.apache.kafka.common.config.ConfigException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.Map; import java.util.Set; import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; import static java.util.Collections.emptyMap; /** * An implementation of {@link ConfigProvider} based on a directory of files. * Property keys correspond to the names of the regular (i.e. non-directory) * files in a directory given by the path parameter. * Property values are taken from the file contents corresponding to each key. */ public class DirectoryConfigProvider implements ConfigProvider { private static final Logger log = LoggerFactory.getLogger(DirectoryConfigProvider.class); @Override public void configure(Map<String, ?> configs) { } @Override public void close() throws IOException { } /** * Retrieves the data contained in regular files in the directory given by {@code path}. * Non-regular files (such as directories) in the given directory are silently ignored. * @param path the directory where data files reside. * @return the configuration data. */ @Override public ConfigData get(String path) { return get(path, Files::isRegularFile); } /** * Retrieves the data contained in the regular files named by {@code keys} in the directory given by {@code path}. * Non-regular files (such as directories) in the given directory are silently ignored. * @param path the directory where data files reside. * @param keys the keys whose values will be retrieved. * @return the configuration data. */ @Override public ConfigData get(String path, Set<String> keys) { return get(path, pathname -> Files.isRegularFile(pathname) && keys.contains(pathname.getFileName().toString())); } private static ConfigData get(String path, Predicate<Path> fileFilter) { Map<String, String> map = emptyMap(); if (path != null && !path.isEmpty()) { Path dir = new File(path).toPath(); if (!Files.isDirectory(dir)) { log.warn("The path {} is not a directory", path); } else { try (Stream<Path> stream = Files.list(dir)) { map = stream .filter(fileFilter) .collect(Collectors.toMap( p -> p.getFileName().toString(), p -> read(p))); } catch (IOException e) { log.error("Could not list directory {}", dir, e); throw new ConfigException("Could not list directory " + dir); } } } return new ConfigData(map); } private static String read(Path path) { try { return new String(Files.readAllBytes(path), StandardCharsets.UTF_8); } catch (IOException e) { log.error("Could not read file {} for property {}", path, path.getFileName(), e); throw new ConfigException("Could not read file " + path + " for property " + path.getFileName()); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/provider/EnvVarConfigProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config.provider; import org.apache.kafka.common.config.ConfigData; import org.apache.kafka.common.config.ConfigException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.regex.Pattern; import java.util.stream.Collectors; /** * An implementation of {@link ConfigProvider} based on environment variables. * Keys correspond to the names of the environment variables, paths are currently not being used. * Using an allowlist pattern {@link EnvVarConfigProvider#ALLOWLIST_PATTERN_CONFIG} that supports regular expressions, * it is possible to limit access to specific environment variables. Default allowlist pattern is ".*". */ public class EnvVarConfigProvider implements ConfigProvider { private static final Logger log = LoggerFactory.getLogger(EnvVarConfigProvider.class); public static final String ALLOWLIST_PATTERN_CONFIG = "allowlist.pattern"; public static final String ALLOWLIST_PATTERN_CONFIG_DOC = "A pattern / regular expression that needs to match for environment variables" + " to be used by this config provider."; private final Map<String, String> envVarMap; private Map<String, String> filteredEnvVarMap; public EnvVarConfigProvider() { envVarMap = getEnvVars(); } public EnvVarConfigProvider(Map<String, String> envVarsAsArgument) { envVarMap = envVarsAsArgument; } @Override public void configure(Map<String, ?> configs) { Pattern envVarPattern; if (configs.containsKey(ALLOWLIST_PATTERN_CONFIG)) { envVarPattern = Pattern.compile( String.valueOf(configs.get(ALLOWLIST_PATTERN_CONFIG)) ); } else { envVarPattern = Pattern.compile(".*"); log.info("No pattern for environment variables provided. Using default pattern '(.*)'."); } filteredEnvVarMap = envVarMap.entrySet().stream() .filter(envVar -> envVarPattern.matcher(envVar.getKey()).matches()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue) ); } @Override public void close() throws IOException { } /** * @param path unused * @return returns environment variables as configuration */ @Override public ConfigData get(String path) { return get(path, null); } /** * @param path path, not used for environment variables * @param keys the keys whose values will be retrieved. * @return the configuration data. */ @Override public ConfigData get(String path, Set<String> keys) { if (path != null && !path.isEmpty()) { log.error("Path is not supported for EnvVarConfigProvider, invalid value '{}'", path); throw new ConfigException("Path is not supported for EnvVarConfigProvider, invalid value '" + path + "'"); } if (keys == null) { return new ConfigData(filteredEnvVarMap); } Map<String, String> filteredData = new HashMap<>(filteredEnvVarMap); filteredData.keySet().retainAll(keys); return new ConfigData(filteredData); } private Map<String, String> getEnvVars() { try { return System.getenv(); } catch (Exception e) { log.error("Could not read environment variables", e); throw new ConfigException("Could not read environment variables"); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/config/provider/FileConfigProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.config.provider; import org.apache.kafka.common.config.ConfigData; import org.apache.kafka.common.config.ConfigException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.io.Reader; import java.nio.file.Files; import java.nio.file.Paths; import java.util.Enumeration; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.Set; /** * An implementation of {@link ConfigProvider} that represents a Properties file. * All property keys and values are stored as cleartext. */ public class FileConfigProvider implements ConfigProvider { private static final Logger log = LoggerFactory.getLogger(FileConfigProvider.class); public void configure(Map<String, ?> configs) { } /** * Retrieves the data at the given Properties file. * * @param path the file where the data resides * @return the configuration data */ public ConfigData get(String path) { Map<String, String> data = new HashMap<>(); if (path == null || path.isEmpty()) { return new ConfigData(data); } try (Reader reader = reader(path)) { Properties properties = new Properties(); properties.load(reader); Enumeration<Object> keys = properties.keys(); while (keys.hasMoreElements()) { String key = keys.nextElement().toString(); String value = properties.getProperty(key); if (value != null) { data.put(key, value); } } return new ConfigData(data); } catch (IOException e) { log.error("Could not read properties from file {}", path, e); throw new ConfigException("Could not read properties from file " + path); } } /** * Retrieves the data with the given keys at the given Properties file. * * @param path the file where the data resides * @param keys the keys whose values will be retrieved * @return the configuration data */ public ConfigData get(String path, Set<String> keys) { Map<String, String> data = new HashMap<>(); if (path == null || path.isEmpty()) { return new ConfigData(data); } try (Reader reader = reader(path)) { Properties properties = new Properties(); properties.load(reader); for (String key : keys) { String value = properties.getProperty(key); if (value != null) { data.put(key, value); } } return new ConfigData(data); } catch (IOException e) { log.error("Could not read properties from file {}", path, e); throw new ConfigException("Could not read properties from file " + path); } } // visible for testing protected Reader reader(String path) throws IOException { return Files.newBufferedReader(Paths.get(path)); } public void close() { } }