index int64 | repo_id string | file_path string | content string |
|---|---|---|---|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/superstream/NatsAuthHandler.java | package org.apache.kafka.common.superstream;
import io.nats.client.AuthHandler;
import io.nats.client.NKey;
public class NatsAuthHandler implements AuthHandler {
private final String jwt;
private final NKey nkey;
public NatsAuthHandler(String jwt, String nkeySeed) {
this.jwt = jwt;
this.nkey = NKey.fromSeed(nkeySeed.toCharArray());
}
@Override
public char[] getID() {
return jwt.toCharArray();
}
@Override
public byte[] sign(byte[] nonce) {
try {
return nkey.sign(nonce);
} catch (Exception e) {
// Handle signing error
e.printStackTrace();
return null;
}
}
@Override
public char[] getJWT() {
return jwt.toCharArray();
}
} |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/superstream/Superstream.java | package org.apache.kafka.common.superstream;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.gson.JsonParser;
import com.google.gson.JsonSyntaxException;
import com.google.protobuf.DescriptorProtos;
import com.google.protobuf.DescriptorProtos.FileDescriptorSet;
import com.google.protobuf.Descriptors;
import com.google.protobuf.Descriptors.FileDescriptor;
import com.google.protobuf.DynamicMessage;
import com.google.protobuf.util.JsonFormat;
import io.nats.client.*;
import io.nats.client.api.ServerInfo;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.net.InetAddress;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.*;
import java.util.stream.Collectors;
import static org.apache.kafka.common.superstream.Consts.*;
public class Superstream {
public Connection brokerConnection;
public JetStream jetstream;
public String superstreamJwt;
public String superstreamNkey;
public byte[] descriptorAsBytes;
public Descriptors.Descriptor descriptor;
public String natsConnectionID;
public String clientHash;
public String accountName;
public int learningFactor = 20;
public int learningFactorCounter = 0;
public boolean learningRequestSent = false;
private static final ObjectMapper objectMapper = new ObjectMapper();
public String ProducerSchemaID = "0";
public String ConsumerSchemaID = "0";
public Map<String, Descriptors.Descriptor> SchemaIDMap = new HashMap<>();
public Map<String, Object> configs;
private Map<String, Object> fullClientConfigs = new HashMap<>();
private Map<String, ?> superstreamConfigs;
public SuperstreamCounters clientCounters = new SuperstreamCounters();
private Subscription updatesSubscription;
private String host;
private String token;
public String type;
public Boolean reductionEnabled;
public Map<String, Set<Integer>> topicPartitions = new ConcurrentHashMap<>();
public ExecutorService executorService = Executors.newFixedThreadPool(3);
private Integer kafkaConnectionID = 0;
public Boolean superstreamReady = false;
private String tags = "";
public Boolean canStart = false;
public Boolean compressionEnabled;
public String compressionType = "zstd";
public Boolean compressionTurnedOffBySuperstream = false;
private String clientIp;
private String clientHost;
private static boolean isStdoutSuppressed = false;
private static boolean isStderrSuppressed = false;
private static PrintStream superstreamPrintStream;
private static PrintStream superstreamErrStream;
private static final PrintStream originalOut = System.out;
private static final PrintStream originalErr = System.err;
private SuperstreamConfigParser configParser = null;
static {
if (Boolean.parseBoolean(System.getenv(SUPERSTREAM_DEBUG_ENV_VAR_ENV_VAR))) {
System.out.println("Superstream library has been loaded.");
}
}
public Superstream(String token, String host, Integer learningFactor, Map<String, Object> configs,
Boolean enableReduction, String type, String tags, Boolean enableCompression) {
this.learningFactor = learningFactor;
this.token = token;
this.host = host;
this.configs = deepCopyMap(configs);
this.reductionEnabled = enableReduction;
this.type = type;
this.tags = tags;
this.compressionEnabled = enableCompression;
superstreamPrintStream = new PrintStream(new ClassOutputStream());
superstreamErrStream = new PrintStream(new ClassErrorStream());
this.configParser = new SuperstreamConfigParser();
}
public Superstream(String token, String host, Integer learningFactor, Map<String, Object> configs,
Boolean enableReduction, String type) {
this(token, host, learningFactor, configs, enableReduction, type, "", false);
}
public void init() {
executorService.submit(() -> {
try {
initializeNatsConnection(token, host);
if (this.brokerConnection != null) {
registerClient(configs);
waitForStart();
if (!canStart) {
throw new Exception("Could not start superstream");
}
superstreamPrintStream.println("Successfully connected to superstream");
subscribeToUpdates();
superstreamReady = true;
reportClientsUpdate();
sendClientTypeUpdateReq();
}
} catch (Exception e) {
handleError(e.getMessage());
}
});
}
private static void checkStdoutEnvVar() {
if (Boolean.parseBoolean(System.getenv(SUPERSTREAM_DEBUG_ENV_VAR_ENV_VAR))) {
isStdoutSuppressed = false;
isStderrSuppressed = false;
} else {
isStdoutSuppressed = true;
isStderrSuppressed = true;
}
}
public void close() {
try {
if (brokerConnection != null) {
brokerConnection.close();
}
executorService.shutdown();
} catch (Exception e) {
}
}
// private Boolean getBooleanEnv(String key, Boolean defaultValue) {
// String value = System.getenv(key);
// return (value != null) ? Boolean.parseBoolean(value) : defaultValue;
// }
private void initializeNatsConnection(String token, String host) {
try {
Options options = new Options.Builder()
.server(host)
.userInfo(superstreamInternalUsername, token)
.maxReconnects(-1)
.connectionTimeout(Duration.ofSeconds(10))
.reconnectWait(Duration.ofSeconds(1))
.connectionListener(new ConnectionListener() {
@Override
public void connectionEvent(Connection conn, Events type) {
if (type == Events.DISCONNECTED) {
brokerConnection = null;
superstreamReady = false;
superstreamPrintStream.println("superstream: disconnected from superstream");
} else if (type == Events.RECONNECTED) {
try {
brokerConnection = conn;
if (brokerConnection != null) {
natsConnectionID = generateNatsConnectionID();
Map<String, Object> reqData = new HashMap<>();
reqData.put("new_nats_connection_id", natsConnectionID);
reqData.put("client_hash", clientHash);
ObjectMapper mapper = new ObjectMapper();
byte[] reqBytes = mapper.writeValueAsBytes(reqData);
brokerConnection.publish(clientReconnectionUpdateSubject, reqBytes);
subscribeToUpdates();
superstreamReady = true;
reportClientsUpdate();
}
} catch (Exception e) {
superstreamPrintStream.println(
"superstream: failed to reconnect: " + e.getMessage());
}
superstreamPrintStream.println("superstream: reconnected to superstream");
}
}
})
.build();
Connection nc = Nats.connect(options);
if (nc == null) {
throw new Exception(String.format("Failed to connect to host: %s", host));
}
JetStream js = nc.jetStream();
if (js == null) {
throw new Exception(String.format("Failed to connect to host: %s", host));
}
brokerConnection = nc;
jetstream = js;
natsConnectionID = generateNatsConnectionID();
} catch (Exception e) {
superstreamPrintStream.println(String.format("superstream: %s", e.getMessage()));
}
}
private String generateNatsConnectionID() {
ServerInfo serverInfo = brokerConnection.getServerInfo();
String connectedServerName = serverInfo.getServerName();
int serverClientID = serverInfo.getClientId();
return connectedServerName + ":" + serverClientID;
}
public void registerClient(Map<String, ?> configs) {
try {
String kafkaConnID = consumeConnectionID();
if (kafkaConnID != null) {
try {
kafkaConnectionID = Integer.parseInt(kafkaConnID);
} catch (Exception e) {
kafkaConnectionID = 0;
}
}
InetAddress localHost = InetAddress.getLocalHost();
this.clientIp = localHost.getHostAddress();
this.clientHost = localHost.getHostName();
Map<String, Object> configToSend = populateConfigToSend(configs);
Map<String, Object> reqData = new HashMap<>();
reqData.put("nats_connection_id", natsConnectionID);
reqData.put("language", "java");
reqData.put("learning_factor", learningFactor);
reqData.put("version", sdkVersion);
reqData.put("config", configToSend);
reqData.put("reduction_enabled", reductionEnabled);
reqData.put("connection_id", kafkaConnectionID);
reqData.put("tags", tags);
reqData.put("client_ip", clientIp);
reqData.put("client_host", clientHost);
ObjectMapper mapper = new ObjectMapper();
byte[] reqBytes = mapper.writeValueAsBytes(reqData);
reqData.put("type", this.type);
Message reply = brokerConnection.request(clientRegisterSubject, reqBytes, Duration.ofMinutes(5));
if (reply != null) {
@SuppressWarnings("unchecked")
Map<String, Object> replyData = mapper.readValue(reply.getData(), Map.class);
Object clientHashObject = replyData.get("client_hash");
if (clientHashObject != null) {
clientHash = clientHashObject.toString();
} else {
superstreamPrintStream.println("superstream: client_hash is not a valid string: " + clientHashObject);
}
Object accountNameObject = replyData.get("account_name");
if (accountNameObject != null) {
accountName = accountNameObject.toString();
} else {
superstreamPrintStream.println("superstream: account_name is not a valid string: " + accountNameObject);
}
Object learningFactorObject = replyData.get("learning_factor");
if (learningFactorObject instanceof Integer) {
learningFactor = (Integer) learningFactorObject;
} else if (learningFactorObject instanceof String) {
try {
learningFactor = Integer.parseInt((String) learningFactorObject);
} catch (NumberFormatException e) {
superstreamPrintStream.println(
"superstream: learning_factor is not a valid integer: " + learningFactorObject);
}
} else {
superstreamPrintStream.println("superstream: learning_factor is not a valid integer: " + learningFactorObject);
}
} else {
String errMsg = "superstream: registering client: No reply received within the timeout period.";
superstreamPrintStream.println(errMsg);
handleError(errMsg);
}
} catch (Exception e) {
superstreamPrintStream.println(String.format("superstream: %s", e.getMessage()));
}
}
public Map<String, Object> deepCopyMap(Map<String, ?> originalMap) {
Map<String, Object> copiedMap = new HashMap<>();
for (Map.Entry<String, ?> entry : originalMap.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
Object copiedValue;
copiedValue = deepCopyObject(value);
copiedMap.put(key, copiedValue);
}
return copiedMap;
}
@SuppressWarnings("unchecked")
private <T> T deepCopyObject(Object object) {
try {
ByteArrayOutputStream byteOut = new ByteArrayOutputStream();
ObjectOutputStream out = new ObjectOutputStream(byteOut);
out.writeObject(object);
ByteArrayInputStream byteIn = new ByteArrayInputStream(byteOut.toByteArray());
ObjectInputStream in = new ObjectInputStream(byteIn);
return (T) in.readObject();
} catch (IOException | ClassNotFoundException e) {
throw new RuntimeException("Error during deep copy", e);
}
}
private Map<String, Object> populateConfigToSend(Map<String, ?> configs) {
Map<String, Object> configToSend = new HashMap<>();
if (configs != null && !configs.isEmpty()) {
for (Map.Entry<String, ?> entry : configs.entrySet()) {
if (!superstreamConnectionKey.equalsIgnoreCase(entry.getKey())) {
configToSend.put(entry.getKey(), entry.getValue());
}
}
}
return configToSend;
}
private void waitForStart() {
CountDownLatch latch = new CountDownLatch(1);
Dispatcher dispatcher = brokerConnection.createDispatcher((msg) -> {
try {
ObjectMapper mapper = new ObjectMapper();
Map<String, Object> messageData = mapper.readValue(msg.getData(), Map.class);
if (messageData.containsKey(START_KEY)) {
boolean start = (Boolean) messageData.get(START_KEY);
if (start) {
canStart = true;
if(messageData.containsKey(OPTIMIZED_CONFIGURATION_KEY)){
Map<String, Object> receivedConfig = (Map<String, Object>) messageData.get(OPTIMIZED_CONFIGURATION_KEY);
this.superstreamConfigs = this.configParser.parse(receivedConfig);
}
latch.countDown();
} else {
String err = (String) messageData.get(ERROR_KEY);
superstreamPrintStream.println("superstream: could not start: " + err);
Thread.currentThread().interrupt();
}
}
} catch (Exception e) {
e.printStackTrace();
}
});
dispatcher.subscribe(String.format(clientStartSubject, clientHash));
try {
if (!latch.await(10, TimeUnit.MINUTES)) {
superstreamPrintStream.println("superstream: unable not connect with superstream for 10 minutes");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
superstreamPrintStream.println("superstream: Could not start superstream: " + e.getMessage());
} finally {
dispatcher.unsubscribe(String.format(clientStartSubject, clientHash));
}
}
private String consumeConnectionID() {
Properties consumerProps = copyAuthConfig();
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
consumerProps.put(superstreamInnerConsumerKey, "true");
consumerProps.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1);
String connectionId = null;
KafkaConsumer<String, String> consumer = null;
try {
consumer = new KafkaConsumer<>(consumerProps);
List<PartitionInfo> partitions = consumer.partitionsFor(superstreamMetadataTopic,
Duration.ofMillis(10000));
if (partitions == null || partitions.isEmpty()) {
if (consumer != null) {
consumer.close();
}
return "0";
}
TopicPartition topicPartition = new TopicPartition(superstreamMetadataTopic, 0);
consumer.assign(Collections.singletonList(topicPartition));
consumer.seekToEnd(Collections.singletonList(topicPartition));
long endOffset = consumer.position(topicPartition);
if (endOffset > 0) {
consumer.seek(topicPartition, endOffset - 1);
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
if (!records.isEmpty()) {
connectionId = records.iterator().next().value();
}
}
} catch (Exception e) {
if (e.getMessage().toLowerCase().contains("timeout")) {
try {
Thread.sleep(10000);
if (consumer == null) {
consumer = new KafkaConsumer<>(consumerProps);
}
List<PartitionInfo> partitions = consumer.partitionsFor(superstreamMetadataTopic,
Duration.ofMillis(10000));
if (partitions == null || partitions.isEmpty()) {
if (consumer != null) {
consumer.close();
}
return "0";
}
TopicPartition topicPartition = new TopicPartition(superstreamMetadataTopic, 0);
consumer.assign(Collections.singletonList(topicPartition));
consumer.seekToEnd(Collections.singletonList(topicPartition));
long endOffset = consumer.position(topicPartition);
if (endOffset > 0) {
consumer.seek(topicPartition, endOffset - 1);
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
if (!records.isEmpty()) {
connectionId = records.iterator().next().value();
}
}
} catch (Exception e2) {
handleError(String.format("consumeConnectionID retry: %s", e2.getMessage()));
}
}
if (connectionId == null || connectionId.equals("0")) {
handleError(String.format("consumeConnectionID: %s", e.getMessage()));
if (consumer != null) {
consumer.close();
}
return "0";
}
} finally {
if (consumer != null) {
consumer.close();
}
}
return connectionId != null ? connectionId : "0";
}
private Properties copyAuthConfig() {
String[] relevantKeys = {
// Authentication-related keys
"security.protocol",
"ssl.truststore.location",
"ssl.truststore.password",
"ssl.keystore.location",
"ssl.keystore.password",
"ssl.key.password",
"ssl.endpoint.identification.algorithm",
"sasl.mechanism",
"sasl.jaas.config",
"sasl.kerberos.service.name",
// Networking-related keys
"bootstrap.servers",
"client.dns.lookup",
"connections.max.idle.ms",
"request.timeout.ms",
"metadata.max.age.ms",
"reconnect.backoff.ms",
"reconnect.backoff.max.ms"
};
Properties relevantProps = new Properties();
for (String key : relevantKeys) {
if (configs.containsKey(key)) {
if (key == ProducerConfig.BOOTSTRAP_SERVERS_CONFIG) {
Object value = configs.get(key);
if (value instanceof String[]) {
relevantProps.put(key, Arrays.toString((String[]) value));
} else if (value instanceof ArrayList) {
@SuppressWarnings("unchecked")
ArrayList<String> arrayList = (ArrayList<String>) value;
relevantProps.put(key, String.join(", ", arrayList));
} else {
relevantProps.put(key, value);
}
} else {
relevantProps.put(key, String.valueOf(configs.get(key)));
}
}
}
return relevantProps;
}
public void sendClientTypeUpdateReq() {
if (type == "" || type == null) {
return;
}
try {
Map<String, Object> reqData = new HashMap<>();
reqData.put("client_hash", clientHash);
reqData.put("type", type);
ObjectMapper mapper = new ObjectMapper();
byte[] reqBytes = mapper.writeValueAsBytes(reqData);
brokerConnection.publish(clientTypeUpdateSubject, reqBytes);
} catch (Exception e) {
handleError(String.format("sendClientTypeUpdateReq: %s", e.getMessage()));
}
}
private void executeSendClientConfigUpdateReqWithWait() {
new Thread(() -> {
try {
waitForCanStart();
sendClientConfigUpdateReq();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
System.err.println("Thread was interrupted: " + e.getMessage());
} catch (RuntimeException e) {
System.err.println("Error: " + e.getMessage());
}
}).start();
}
private void waitForCanStart() throws InterruptedException {
long remainingTime = MAX_TIME_WAIT_CAN_START;
while (remainingTime > 0) {
if (!this.canStart) {
Thread.sleep(WAIT_INTERVAL_CAN_START);
remainingTime -= WAIT_INTERVAL_CAN_START;
} else {
break;
}
if (remainingTime <= 0) {
superstreamPrintStream.println("superstream could not start within the expected timeout period");
}
}
}
public void waitForSuperstreamConfigs(AbstractConfig config) throws InterruptedException {
String timeoutEnv = System.getenv(SUPERSTREAM_RESPONSE_TIMEOUT_ENV_VAR);
long remainingTime = timeoutEnv != null ? Long.parseLong(timeoutEnv) : TIMEOUT_SUPERSTREAM_CONFIG_DEFAULT;
while (remainingTime > 0) {
if (this.superstreamConfigs != null) {
config.getValues().putAll(this.getSuperstreamConfigs());
break;
}
remainingTime -= WAIT_INTERVAL_SUPERSTREAM_CONFIG;
if (remainingTime > 0) {
Thread.sleep(WAIT_INTERVAL_SUPERSTREAM_CONFIG);
} else {
superstreamPrintStream.println("superstream client configuration was not set within the expected timeout period");
}
}
}
private void sendClientConfigUpdateReq() {
if (this.fullClientConfigs != null && !this.fullClientConfigs.isEmpty()) {
try {
ObjectMapper mapper = new ObjectMapper();
Map<String, Object> reqData = new HashMap<>();
reqData.put("client_hash", clientHash);
convertEntryValueWhenNoSerializer(this.fullClientConfigs, mapper);
reqData.put("config", this.fullClientConfigs);
byte[] reqBytes = mapper.writeValueAsBytes(reqData);
brokerConnection.publish(clientConfigUpdateSubject, reqBytes);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
} catch (Exception e) {
handleError(String.format("sendClientConfigUpdateReq: %s", e.getMessage()));
}
}
}
private void convertEntryValueWhenNoSerializer(Map<String, Object> config, ObjectMapper mapper) {
if (config != null && !config.isEmpty()) {
for (Map.Entry<String, Object> entry : config.entrySet()) {
Object value = entry.getValue();
String key = entry.getKey();
if (key == "sasl.jaas.config"){
entry.setValue("[hidden]");
continue;
}
try {
mapper.writeValueAsBytes(value);
} catch (JsonProcessingException e) {
entry.setValue(value.toString());
}
}
}
}
public void subscribeToUpdates() {
try {
String subject = String.format(superstreamUpdatesSubject, clientHash);
Dispatcher dispatcher = brokerConnection.createDispatcher(this.updatesHandler());
updatesSubscription = dispatcher.subscribe(subject, this.updatesHandler());
} catch (Exception e) {
e.printStackTrace();
}
}
public void reportClientsUpdate() {
ScheduledExecutorService singleExecutorService = Executors.newSingleThreadScheduledExecutor();
singleExecutorService.scheduleAtFixedRate(() -> {
if (brokerConnection != null && superstreamReady) {
long backupReadBytes = clientCounters.getTotalReadBytesReduced();
long backupWriteBytes = clientCounters.getTotalWriteBytesReduced();
Double producerCompressionRate = clientCounters.getProducerCompressionRate();
long calculatedWriteBytes = Math.round(backupWriteBytes * producerCompressionRate);
Double consumerCompressionRate = clientCounters.getConsumerCompressionRate();
long calculatedReadBytes = Math.round(backupReadBytes * consumerCompressionRate);
clientCounters.reset();
try {
Map<String, Object> countersMap = new HashMap<>();
countersMap.put("total_read_bytes_reduced", calculatedReadBytes);
countersMap.put("total_write_bytes_reduced", calculatedWriteBytes);
countersMap.put("connection_id", kafkaConnectionID);
byte[] byteCounters = objectMapper.writeValueAsBytes(countersMap);
brokerConnection.publish(
String.format(superstreamClientsUpdateSubject, "counters", clientHash),
byteCounters);
} catch (Exception e) {
clientCounters.incrementTotalReadBytesReduced(backupReadBytes);
clientCounters.incrementTotalWriteBytesReduced(backupWriteBytes);
handleError("reportClientsUpdate config: " + e.getMessage());
}
try {
Map<String, Object> topicPartitionConfig = new HashMap<>();
if (!topicPartitions.isEmpty()) {
Map<String, Integer[]> topicPartitionsToSend = convertMap(topicPartitions);
switch (this.type) {
case "producer":
topicPartitionConfig.put("producer_topics_partitions", topicPartitionsToSend);
topicPartitionConfig.put("consumer_group_topics_partitions",
new HashMap<String, Integer[]>());
break;
case "consumer":
topicPartitionConfig.put("producer_topics_partitions",
new HashMap<String, Integer[]>());
topicPartitionConfig.put("consumer_group_topics_partitions", topicPartitionsToSend);
break;
}
}
topicPartitionConfig.put("connection_id", kafkaConnectionID);
byte[] byteConfig = objectMapper.writeValueAsBytes(topicPartitionConfig);
brokerConnection.publish(
String.format(superstreamClientsUpdateSubject, "config", clientHash),
byteConfig);
} catch (Exception e) {
handleError("reportClientsUpdate config: " + e.getMessage());
}
}
}, 0, 10, TimeUnit.MINUTES);
}
public static Map<String, Integer[]> convertMap(Map<String, Set<Integer>> topicPartitions) {
Map<String, Integer[]> result = new HashMap<>();
for (Map.Entry<String, Set<Integer>> entry : topicPartitions.entrySet()) {
Integer[] array = entry.getValue().toArray(new Integer[0]);
result.put(entry.getKey(), array);
}
return result;
}
public void sendLearningMessage(byte[] msg) {
try {
brokerConnection.publish(String.format(superstreamLearningSubject, clientHash), msg);
} catch (Exception e) {
handleError("sendLearningMessage: " + e.getMessage());
}
}
public void sendRegisterSchemaReq() {
try {
brokerConnection.publish(String.format(superstreamRegisterSchemaSubject, clientHash), new byte[0]);
learningRequestSent = true;
} catch (Exception e) {
handleError("sendLearningMessage: " + e.getMessage());
}
}
public JsonToProtoResult jsonToProto(byte[] msgBytes) throws Exception {
try {
String jsonString = new String(msgBytes);
if (!isJsonObject(jsonString)) {
jsonString = convertEscapedJsonString(jsonString);
}
if (jsonString == null || jsonString.isEmpty()) {
return new JsonToProtoResult(false, msgBytes);
}
if (jsonString != null && jsonString.length() > 2 && jsonString.startsWith("\"{")
&& jsonString.endsWith("}\"")) {
jsonString = jsonString.substring(1, jsonString.length() - 1);
}
DynamicMessage.Builder newMessageBuilder = DynamicMessage.newBuilder(descriptor);
JsonFormat.parser().merge(jsonString, newMessageBuilder);
DynamicMessage message = newMessageBuilder.build();
return new JsonToProtoResult(true, message.toByteArray());
} catch (Exception e) {
return new JsonToProtoResult(false, msgBytes);
}
}
public class JsonToProtoResult {
private final boolean success;
private final byte[] messageBytes;
public JsonToProtoResult(boolean success, byte[] messageBytes) {
this.success = success;
this.messageBytes = messageBytes;
}
public boolean isSuccess() {
return success;
}
public byte[] getMessageBytes() {
return messageBytes;
}
}
private boolean isJsonObject(String jsonString) {
try {
JsonParser.parseString(jsonString).getAsJsonObject();
return true;
} catch (JsonSyntaxException | IllegalStateException e) {
return false;
}
}
private static String convertEscapedJsonString(String escapedJsonString) throws Exception {
ObjectMapper mapper = new ObjectMapper();
JsonNode jsonNode = mapper.readTree(escapedJsonString);
return mapper.writeValueAsString(jsonNode).replace("\\\"", "\"").replace("\\\\", "\\");
}
public byte[] protoToJson(byte[] msgBytes, Descriptors.Descriptor desc) throws Exception {
try {
DynamicMessage message = DynamicMessage.parseFrom(desc, msgBytes);
String jsonString = JsonFormat.printer().omittingInsignificantWhitespace().print(message);
return jsonString.getBytes(StandardCharsets.UTF_8);
} catch (Exception e) {
if (e.getMessage().contains("the input ended unexpectedly")) {
return msgBytes;
} else {
throw e;
}
}
}
private MessageHandler updatesHandler() {
return (msg) -> {
try {
@SuppressWarnings("unchecked")
Map<String, Object> update = objectMapper.readValue(msg.getData(), Map.class);
processUpdate(update);
} catch (IOException e) {
handleError("updatesHandler at json.Unmarshal: " + e.getMessage());
}
};
}
private void processUpdate(Map<String, Object> update) {
String type = (String) update.get("type");
try {
String payloadBytesString = (String) update.get("payload");
byte[] payloadBytes = Base64.getDecoder().decode(payloadBytesString);
@SuppressWarnings("unchecked")
Map<String, Object> payload = objectMapper.readValue(payloadBytes, Map.class);
Map<String, String> envVars = System.getenv();
switch (type) {
case "LearnedSchema":
String descriptorBytesString = (String) payload.get("desc");
String masterMsgName = (String) payload.get("master_msg_name");
String fileName = (String) payload.get("file_name");
descriptor = compileMsgDescriptor(descriptorBytesString, masterMsgName, fileName);
String schemaID = (String) payload.get("schema_id");
ProducerSchemaID = schemaID;
break;
case "ToggleReduction":
// if defined as false in env vars - override the value from superstream
String reductionEnabledString = envVars.get(SUPERSTREAM_REDUCTION_ENABLED_ENV_VAR);
if (reductionEnabledString != null) {
Boolean reductionEnabled = Boolean.parseBoolean(reductionEnabledString);
if (!reductionEnabled) {
this.reductionEnabled = false;
break;
}
}
Boolean enableReduction = (Boolean) payload.get("enable_reduction");
if (enableReduction) {
this.reductionEnabled = true;
} else {
this.reductionEnabled = false;
}
break;
case "CompressionUpdate":
Boolean enableCompression = (Boolean) payload.get("enable_compression");
if (enableCompression) {
this.compressionTurnedOffBySuperstream = false;
} else {
this.compressionTurnedOffBySuperstream = true;
}
this.compressionEnabled = enableCompression;
String compType = (String) payload.get("compression_type");
if (compType != null) {
this.compressionType = compType;
}
break;
}
} catch (Exception e) {
handleError(("processUpdate: " + e.getMessage()));
}
}
public void sendGetSchemaRequest(String schemaID) {
try {
Map<String, Object> reqData = new HashMap<>();
reqData.put("schema_id", schemaID);
ObjectMapper mapper = new ObjectMapper();
byte[] reqBytes = mapper.writeValueAsBytes(reqData);
Message msg = brokerConnection.request(String.format(superstreamGetSchemaSubject, clientHash),
reqBytes, Duration.ofSeconds(5));
if (msg == null) {
throw new Exception("Could not get descriptor");
}
@SuppressWarnings("unchecked")
Map<String, Object> respMap = objectMapper.readValue(new String(msg.getData(), StandardCharsets.UTF_8),
Map.class);
if (respMap.containsKey("desc") && respMap.get("desc") instanceof String) {
String descriptorBytesString = (String) respMap.get("desc");
String masterMsgName = (String) respMap.get("master_msg_name");
String fileName = (String) respMap.get("file_name");
Descriptors.Descriptor respDescriptor = compileMsgDescriptor(descriptorBytesString, masterMsgName,
fileName);
if (respDescriptor != null) {
SchemaIDMap.put((String) respMap.get("schema_id"), respDescriptor);
} else {
throw new Exception("Error compiling schema.");
}
} else {
throw new Exception("Response map does not contain expected keys.");
}
} catch (Exception e) {
handleError(String.format("sendGetSchemaRequest: %s", e.getMessage()));
}
}
private Descriptors.Descriptor compileMsgDescriptor(String descriptorBytesString, String masterMsgName,
String fileName) {
try {
byte[] descriptorAsBytes = Base64.getDecoder().decode(descriptorBytesString);
if (descriptorAsBytes == null) {
throw new Exception("error decoding descriptor bytes");
}
FileDescriptorSet descriptorSet = FileDescriptorSet.parseFrom(descriptorAsBytes);
FileDescriptor fileDescriptor = null;
for (DescriptorProtos.FileDescriptorProto fdp : descriptorSet.getFileList()) {
if (fdp.getName().equals(fileName)) {
fileDescriptor = FileDescriptor.buildFrom(fdp, new FileDescriptor[]{});
break;
}
}
if (fileDescriptor == null) {
throw new Exception("file not found");
}
for (Descriptors.Descriptor md : fileDescriptor.getMessageTypes()) {
if (md.getName().equals(masterMsgName)) {
return md;
}
}
} catch (Exception e) {
handleError(String.format("compileMsgDescriptor: %s", e.getMessage()));
}
return null;
}
public void handleError(String msg) {
if (brokerConnection != null && superstreamReady) {
Map<String, String> envVars = System.getenv();
String tags = envVars.get(SUPERSTREAM_TAGS_ENV_VAR);
if (tags == null) {
tags = "";
}
if (clientHash == "") {
String message = String.format("[sdk: java][version: %s][tags: %s] %s", sdkVersion, tags, msg);
brokerConnection.publish(superstreamErrorSubject, message.getBytes(StandardCharsets.UTF_8));
} else {
String message = String.format("[clientHash: %s][sdk: java][version: %s][tags: %s] %s",
clientHash, sdkVersion, tags, msg);
brokerConnection.publish(superstreamErrorSubject, message.getBytes(StandardCharsets.UTF_8));
}
}
}
public static Map<String, Object> initSuperstreamConfig(Map<String, Object> configs, String type) {
String isInnerConsumer = (String) configs.get(superstreamInnerConsumerKey);
if (Boolean.parseBoolean(isInnerConsumer)) {
return configs;
}
String interceptorToAdd = getSuperstreamClientInterceptorName(type);
try {
List<String> interceptors = null;
Object existingInterceptors = configs.get(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG);
if (!interceptorToAdd.isEmpty()) {
if (existingInterceptors != null) {
if (existingInterceptors instanceof List) {
interceptors = new ArrayList<>((List<String>) existingInterceptors);
} else if (existingInterceptors instanceof String) {
interceptors = new ArrayList<>();
interceptors.add((String) existingInterceptors);
} else {
interceptors = new ArrayList<>();
}
} else {
interceptors = new ArrayList<>();
}
}
if (!interceptorToAdd.isEmpty()) {
interceptors.add(interceptorToAdd);
configs.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, interceptors);
}
Map<String, String> envVars = System.getenv();
String superstreamHost = envVars.get(SUPERSTREAM_HOST_ENV_VAR);
if (superstreamHost == null) {
throw new Exception("host is required");
}
configs.put(superstreamHostKey, superstreamHost);
String token = envVars.get(SUPERSTREAM_TOKEN_ENV_VAR);
if (token == null) {
token = superstreamDefaultToken;
}
configs.put(superstreamTokenKey, token);
String learningFactorString = envVars.get(SUPERSTREAM_LEARNING_FACTOR_ENV_VAR);
Integer learningFactor = superstreamDefaultLearningFactor;
if (learningFactorString != null) {
learningFactor = Integer.parseInt(learningFactorString);
}
configs.put(superstreamLearningFactorKey, learningFactor);
boolean reductionEnabled = false;
String reductionEnabledString = envVars.get(SUPERSTREAM_REDUCTION_ENABLED_ENV_VAR);
if (reductionEnabledString != null) {
reductionEnabled = Boolean.parseBoolean(reductionEnabledString);
}
configs.put(superstreamReductionEnabledKey, reductionEnabled);
String tags = envVars.get(SUPERSTREAM_TAGS_ENV_VAR);
if (tags == null) {
tags = "";
}
boolean compressionEnabled = false;
checkStdoutEnvVar();
Superstream superstreamConnection = new Superstream(token, superstreamHost, learningFactor, configs,
reductionEnabled, type, tags, compressionEnabled);
superstreamConnection.init();
configs.put(superstreamConnectionKey, superstreamConnection);
} catch (Exception e) {
String errMsg = String.format("superstream: error initializing superstream: %s", e.getMessage());
System.out.println(errMsg);
handleConfigsWhenErrorInitializeSuperstream(type, configs);
}
return configs;
}
private static void handleConfigsWhenErrorInitializeSuperstream(String type, Map<String, Object> configs) {
switch (type) {
case PRODUCER:
if (configs.containsKey(originalSerializer)) {
configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
configs.get(originalSerializer));
configs.remove(originalSerializer);
}
break;
case CONSUMER:
if (configs.containsKey(originalDeserializer)) {
configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
configs.get(originalDeserializer));
configs.remove(originalDeserializer);
}
break;
}
}
private static String getSuperstreamClientInterceptorName(String type) {
switch (type) {
case "producer":
handleSerializerLogicForPayloadReduction();
return SuperstreamProducerInterceptor.class.getName();
case "consumer":
handleDeserializerLogicForPayloadReduction();
return SuperstreamConsumerInterceptor.class.getName();
default:
return "";
}
}
private static void handleDeserializerLogicForPayloadReduction() {
// : handle deserializer logic for payload reduction
// igs.containsKey(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG)) {
// if (!configs.containsKey(Consts.originalDeserializer)) {
// igs.put(Consts.originalDeserializer,
//
// put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
// SuperstreamDeserializer.class.getName());
//
//
}
private static void handleSerializerLogicForPayloadReduction() {
// : handle serializer logic for payload reduction
// igs.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
// if (!configs.containsKey(Consts.originalSerializer)) {
// igs.put(Consts.originalSerializer,
//
// put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
// SuperstreamSerializer.class.getName());
//
}
public static Properties initSuperstreamProps(Properties properties, String type) {
String interceptors = (String) properties.get(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG);
switch (type) {
case "producer":
if (interceptors != null && !interceptors.isEmpty()) {
interceptors += "," + SuperstreamProducerInterceptor.class.getName();
} else {
interceptors = SuperstreamProducerInterceptor.class.getName();
}
if (properties.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
if (!properties.containsKey(originalSerializer)) {
properties.put(originalSerializer,
properties.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG));
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
SuperstreamSerializer.class.getName());
}
}
break;
case "consumer":
if (interceptors != null && !interceptors.isEmpty()) {
interceptors += "," + SuperstreamConsumerInterceptor.class.getName();
} else {
interceptors = SuperstreamConsumerInterceptor.class.getName();
}
if (properties.containsKey(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG)) {
if (!properties.containsKey(originalDeserializer)) {
properties.put(originalDeserializer,
properties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG));
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
SuperstreamDeserializer.class.getName());
}
}
break;
}
if (interceptors != null) {
properties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, interceptors);
}
try {
Map<String, String> envVars = System.getenv();
String superstreamHost = envVars.get(SUPERSTREAM_HOST_ENV_VAR);
if (superstreamHost == null) {
throw new Exception("host is required");
}
properties.put(superstreamHostKey, superstreamHost);
String token = envVars.get(SUPERSTREAM_TOKEN_ENV_VAR);
if (token == null) {
token = superstreamDefaultToken;
}
properties.put(superstreamTokenKey, token);
String learningFactorString = envVars.get(SUPERSTREAM_LEARNING_FACTOR_ENV_VAR);
Integer learningFactor = superstreamDefaultLearningFactor;
if (learningFactorString != null) {
learningFactor = Integer.parseInt(learningFactorString);
}
properties.put(superstreamLearningFactorKey, learningFactor);
Boolean reductionEnabled = false;
String reductionEnabledString = envVars.get(SUPERSTREAM_REDUCTION_ENABLED_ENV_VAR);
if (reductionEnabledString != null) {
reductionEnabled = Boolean.parseBoolean(reductionEnabledString);
}
properties.put(superstreamReductionEnabledKey, reductionEnabled);
String tags = envVars.get(SUPERSTREAM_TAGS_ENV_VAR);
if (tags != null) {
properties.put(superstreamTagsKey, tags);
}
Map<String, Object> configs = propertiesToMap(properties);
Superstream superstreamConnection = new Superstream(token, superstreamHost, learningFactor, configs,
reductionEnabled, type);
superstreamConnection.init();
properties.put(superstreamConnectionKey, superstreamConnection);
} catch (Exception e) {
String errMsg = String.format("superstream: error initializing superstream: %s", e.getMessage());
superstreamPrintStream.println(errMsg);
}
return properties;
}
public static Map<String, Object> propertiesToMap(Properties properties) {
return properties.entrySet().stream()
.collect(Collectors.toMap(
e -> String.valueOf(e.getKey()),
e -> e.getValue()));
}
public void updateTopicPartitions(String topic, Integer partition) {
Set<Integer> partitions = topicPartitions.computeIfAbsent(topic, k -> new HashSet<>());
partitions.add(partition);
}
public void setFullClientConfigs(Map<String, ?> configsUpdate) {
for (Map.Entry<String, ?> entry : configsUpdate.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
Object copiedValue;
if (this.configs.containsKey(key)) {
copiedValue = deepCopyObject(this.configs.get(key));
} else {
copiedValue = deepCopyObject(value);
}
this.fullClientConfigs.put(key, copiedValue);
}
executeSendClientConfigUpdateReqWithWait();
}
public PrintStream getSuperstreamPrintStream() {
return superstreamPrintStream;
}
public Map<String, ?> getSuperstreamConfigs() {
return superstreamConfigs;
}
private static class ClassOutputStream extends OutputStream {
@Override
public void write(int b) {
if (!isStdoutSuppressed) {
originalOut.write(b);
}
}
@Override
public void write(byte[] b, int off, int len) {
if (!isStdoutSuppressed) {
originalOut.write(b, off, len);
}
}
}
private static class ClassErrorStream extends OutputStream {
@Override
public void write(int b) {
if (!isStderrSuppressed) {
originalErr.write(b);
}
}
@Override
public void write(byte[] b, int off, int len) {
if (!isStderrSuppressed) {
originalErr.write(b, off, len);
}
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/superstream/SuperstreamConfigParser.java | package org.apache.kafka.common.superstream;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class SuperstreamConfigParser {
private Map<String, Boolean> longCastMap = null;
private Map<String, Boolean> shortCastMap = null;
private Map<String, Boolean> classCastMap = null;
public SuperstreamConfigParser() {
this.longCastMap = this.getLongCastMap();
this.shortCastMap = this.getShortCastMap();
this.classCastMap = this.getClassCastMap();
}
private Map<String, Boolean> getLongCastMap() {
Map<String, Boolean> m = new HashMap<>();
m.put("buffer.memory", true);
m.put("linger.ms", true);
m.put("connections.max.idle.ms", true);
m.put("max.block.ms", true);
m.put("metadata.max.age.ms", true);
m.put("metadata.max.idle.ms", true);
m.put("metrics.sample.window.ms", true);
m.put("partitioner.availability.timeout.ms", true);
m.put("reconnect.backoff.max.ms", true);
m.put("reconnect.backoff.ms", true);
m.put("retry.backoff.ms", true);
m.put("sasl.kerberos.min.time.before.relogin", true);
m.put("sasl.login.retry.backoff.max.ms", true);
m.put("sasl.login.retry.backoff.ms", true);
m.put("sasl.oauthbearer.jwks.endpoint.refresh.ms", true);
return m;
}
private Map<String, Boolean> getShortCastMap() {
Map<String, Boolean> m = new HashMap<>();
m.put("sasl.login.refresh.min.period.seconds", true);
m.put("sasl.login.refresh.buffer.seconds", true);
return m;
}
private Map<String, Boolean> getClassCastMap() {
Map<String, Boolean> m = new HashMap<>();
m.put("alter.config.policy.class.name", true);
m.put("create.topic.policy.class.name", true);
m.put("partitioner.class", true);
m.put("sasl.client.callback.handler.class", true);
m.put("sasl.login.callback.handler.class", true);
m.put("sasl.login.class", true);
m.put("ssl.engine.factory.class", true);
m.put("key.serializer", true);
m.put("value.serializer", true);
m.put("key.deserializer", true);
m.put("value.deserializer", true);
return m;
}
public Map<String, ?> parse(Map<String, Object> receivedConfig) throws ClassNotFoundException {
for (Map.Entry<String, Object> entry : receivedConfig.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
if (key.equals("bootstrap.servers")) {
List<String> serversList = Arrays.asList(value.toString().split(","));
receivedConfig.put(key, serversList);
continue;
}
if (longCastMap.containsKey(key)) {
Long castedObj = Long.valueOf(value.toString());
receivedConfig.put(key, castedObj);
continue;
}
if (shortCastMap.containsKey(key)) {
Short castedObj = Short.valueOf(value.toString());
receivedConfig.put(key, castedObj);
continue;
}
if (classCastMap.containsKey(key)) {
Class<?> castedObj = Class.forName(value.toString());
receivedConfig.put(key, castedObj);
continue;
}
if (key.equals("acks") && value.equals("all")) {
receivedConfig.put(key, "-1");
}
}
return receivedConfig;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/superstream/SuperstreamConsumerInterceptor.java | package org.apache.kafka.common.superstream;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerInterceptor;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Header;
public class SuperstreamConsumerInterceptor<K, V> implements ConsumerInterceptor<K, V> {
Superstream superstreamConnection;
public SuperstreamConsumerInterceptor() {
}
@Override
public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) {
if (this.superstreamConnection != null) {
if (!records.isEmpty()) {
for (ConsumerRecord<K, V> record : records) {
this.superstreamConnection.updateTopicPartitions(record.topic(), record.partition());
int serializedTotalSize = record.serializedValueSize() + record.serializedKeySize();
for (Header header : record.headers()) {
serializedTotalSize += header.key().getBytes().length + header.value().length;
}
if (serializedTotalSize > 0) {
this.superstreamConnection.clientCounters.incrementTotalReadBytesReduced(serializedTotalSize);
}
this.superstreamConnection.clientCounters.incrementTotalReadBytes(serializedTotalSize);
}
}
}
;
return records;
}
@Override
public void onCommit(Map<TopicPartition, OffsetAndMetadata> offsets) {
}
@Override
public void close() {
}
@Override
public void configure(Map<String, ?> configs) {
Superstream superstreamConn = (Superstream) configs.get(Consts.superstreamConnectionKey);
if (superstreamConn != null) {
this.superstreamConnection = superstreamConn;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/superstream/SuperstreamCounters.java | package org.apache.kafka.common.superstream;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.Metric;
import java.util.Map;
public class SuperstreamCounters {
@JsonProperty("total_read_bytes_reduced")
public AtomicLong TotalReadBytesReduced = new AtomicLong(0);
@JsonProperty("total_write_bytes_reduced")
public AtomicLong TotalWriteBytesReduced = new AtomicLong(0);
public AtomicLong TotalReadBytes = new AtomicLong(0);
public Metrics metrics;
public SuperstreamCounters() {
}
public void reset() {
TotalReadBytesReduced.set(0);
TotalWriteBytesReduced.set(0);
}
public void incrementTotalReadBytesReduced(long bytes) {
TotalReadBytesReduced.addAndGet(bytes);
}
public void incrementTotalWriteBytesReduced(long bytes) {
TotalWriteBytesReduced.addAndGet(bytes);
}
public void incrementTotalReadBytes(long bytes) {
TotalReadBytes.addAndGet(bytes);
}
public long getTotalReadBytesReduced() {
return TotalReadBytesReduced.get();
}
public long getTotalWriteBytesReduced() {
return TotalWriteBytesReduced.get();
}
public long getTotalReadBytes() {
return TotalReadBytes.get();
}
public Double getProducerCompressionRate() {
Double rate = getProducerCompressionMetric();
if (rate == null || rate.isNaN() || rate > 1.0 || rate == 1.0 || rate < 0.0) {
return 0.0;
}
if (rate > 0.0 && rate < 1.0) {
return (1-rate);
}
return 0.0;
}
public Double getConsumerCompressionRate() {
Double totalBytesCompressedConsumed = getConsumerBytesConsumedMetric();
long totalRead = getTotalReadBytes();
if (totalBytesCompressedConsumed == null || totalBytesCompressedConsumed.isNaN() || totalBytesCompressedConsumed <= 0.0 || totalRead <= 0) {
return 0.0;
}
if (totalBytesCompressedConsumed > totalRead) {
return 0.0;
}
return (1 - (totalBytesCompressedConsumed / totalRead));
}
public void setMetrics(Metrics metrics) {
this.metrics = metrics;
}
public Double getProducerCompressionMetric() {
if (metrics != null) {
for (Map.Entry<MetricName, ? extends Metric> entry : metrics.metrics().entrySet()) {
String name = entry.getKey().name();
if (name.equals("compression-rate-avg")) {
return (Double) entry.getValue().metricValue();
}
}
}
return 0.0;
}
public Double getConsumerBytesConsumedMetric() {
if (metrics != null) {
for (Map.Entry<MetricName, ? extends Metric> entry : metrics.metrics().entrySet()) {
String name = entry.getKey().name();
if (name.equals("bytes-consumed-total")) {
return (Double) entry.getValue().metricValue();
}
}
}
return 0.0;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/superstream/SuperstreamDeserializer.java | package org.apache.kafka.common.superstream;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import org.apache.kafka.common.serialization.Deserializer;
import com.google.protobuf.Descriptors;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
public class SuperstreamDeserializer<T> implements Deserializer<T> {
private Deserializer<T> originalDeserializer;
private Superstream superstreamConnection;
public SuperstreamDeserializer() {
}
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
try {
Object originalDeserializerObj = configs.get(Consts.originalDeserializer);
if (originalDeserializerObj == null) {
throw new Exception("original deserializer is required");
}
Class<?> originalDeserializerClass;
if (originalDeserializerObj instanceof String) {
originalDeserializerClass = Class.forName((String) originalDeserializerObj);
} else if (originalDeserializerObj instanceof Class) {
originalDeserializerClass = (Class<?>) originalDeserializerObj;
} else {
throw new Exception("Invalid type for original deserializer");
}
@SuppressWarnings("unchecked")
Deserializer<T> originalDeserializerT = (Deserializer<T>) originalDeserializerClass.getDeclaredConstructor()
.newInstance();
this.originalDeserializer = originalDeserializerT;
this.originalDeserializer.configure(configs, isKey);
Superstream superstreamConn = (Superstream) configs.get(Consts.superstreamConnectionKey);
if (superstreamConn == null) {
System.out.println("Failed to connect to Superstream");
} else {
this.superstreamConnection = superstreamConn;
}
} catch (Exception e) {
String errMsg = String.format("superstream: error initializing superstream: %s", e.getMessage());
if (superstreamConnection != null) {
superstreamConnection.handleError(errMsg);
}
System.out.println(errMsg);
}
}
@Override
public T deserialize(String topic, byte[] data) {
if (originalDeserializer == null) {
return null;
}
T deserializedData = originalDeserializer.deserialize(topic, data);
return deserializedData;
}
@Override
public T deserialize(String topic, Headers headers, byte[] data) {
if (originalDeserializer == null) {
return null;
}
String schemaId = null;
Header header = headers.lastHeader("superstream_schema");
if (header != null) {
schemaId = new String(header.value(), StandardCharsets.UTF_8);
}
byte[] dataToDesrialize = data;
if (dataToDesrialize == null) {
this.originalDeserializer.deserialize(topic, headers, dataToDesrialize);
}
// if (this.superstreamConnection != null) {
// this.superstreamConnection.clientCounters.incrementTotalBytesAfterReduction(data.length);
// }
if (schemaId != null) {
if (!this.superstreamConnection.superstreamReady) {
int totalWaitTime = 60;
int checkInterval = 5;
try {
for (int i = 0; i < totalWaitTime; i += checkInterval) {
if (this.superstreamConnection.superstreamReady) {
break;
}
Thread.sleep(checkInterval * 1000);
}
} catch (Exception e) {
}
}
if (!this.superstreamConnection.superstreamReady) {
System.out.println(
"superstream: cannot connect with superstream and consume message that was modified by superstream");
return null;
}
Descriptors.Descriptor desc = superstreamConnection.SchemaIDMap.get(schemaId);
if (desc == null) {
superstreamConnection.sendGetSchemaRequest(schemaId);
desc = superstreamConnection.SchemaIDMap.get(schemaId);
if (desc == null) {
superstreamConnection.handleError("error getting schema with id: " + schemaId);
System.out.println("superstream: shcema not found");
return null;
}
}
try {
byte[] supertstreamDeserialized = superstreamConnection.protoToJson(data, desc);
dataToDesrialize = supertstreamDeserialized;
// superstreamConnection.clientCounters
// .incrementTotalBytesBeforeReduction(supertstreamDeserialized.length);
// superstreamConnection.clientCounters.incrementTotalMessagesSuccessfullyConsumed();
} catch (Exception e) {
superstreamConnection.handleError(String.format("error deserializing data: %s", e.getMessage()));
return null;
}
} else {
if (superstreamConnection != null) {
// superstreamConnection.clientCounters.incrementTotalBytesBeforeReduction(data.length);
// superstreamConnection.clientCounters.incrementTotalMessagesFailedConsume();
}
}
T deserializedData = this.originalDeserializer.deserialize(topic, headers, dataToDesrialize);
return deserializedData;
}
@Override
public void close() {
if (originalDeserializer != null) {
originalDeserializer.close();
}
if (superstreamConnection != null) {
superstreamConnection.close();
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/superstream/SuperstreamProducerInterceptor.java | package org.apache.kafka.common.superstream;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import org.apache.kafka.clients.producer.ProducerInterceptor;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
public class SuperstreamProducerInterceptor<K, V> implements ProducerInterceptor<K, V> {
Superstream superstreamConnection;
public SuperstreamProducerInterceptor() {
}
@Override
public ProducerRecord<K, V> onSend(ProducerRecord<K, V> record) {
if (this.superstreamConnection != null) {
if (record != null) {
int headersSize = 0;
for (Header header : record.headers()) {
headersSize += header.key().getBytes().length + header.value().length;
}
if (headersSize > 0) {
this.superstreamConnection.clientCounters.incrementTotalWriteBytesReduced(headersSize);
}
this.superstreamConnection.updateTopicPartitions(record.topic(), record.partition());
}
}
record.value();
return record;
}
@Override
public void onAcknowledgement(RecordMetadata metadata, Exception exception) {
if (this.superstreamConnection != null && metadata != null) {
if (exception == null) {
int serializedTotalSize = metadata.serializedValueSize() + metadata.serializedKeySize();
if (serializedTotalSize > 0) {
this.superstreamConnection.clientCounters.incrementTotalWriteBytesReduced(serializedTotalSize);
}
}
}
}
@Override
public void close() {
}
@Override
public void configure(Map<String, ?> configs) {
Superstream superstreamConn = (Superstream) configs.get(Consts.superstreamConnectionKey);
if (superstreamConn != null) {
this.superstreamConnection = superstreamConn;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/superstream/SuperstreamSerializer.java | package org.apache.kafka.common.superstream;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import java.util.Properties;
import com.github.luben.zstd.Zstd;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.common.superstream.Superstream.JsonToProtoResult;
public class SuperstreamSerializer<T> implements Serializer<T> {
private Serializer<T> originalSerializer;
private Superstream superstreamConnection;
private volatile String compressionType = "none";
private boolean producerCompressionEnabled = false;
public SuperstreamSerializer() {
}
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
try {
Object originalSerializerObj = configs.get(Consts.originalSerializer);
if (originalSerializerObj == null) {
throw new Exception("Original serializer is required");
}
Class<?> originalSerializerClass;
if (originalSerializerObj instanceof String) {
originalSerializerClass = Class.forName((String) originalSerializerObj);
} else if (originalSerializerObj instanceof Class) {
originalSerializerClass = (Class<?>) originalSerializerObj;
} else {
throw new Exception("Invalid type for original serializer");
}
@SuppressWarnings("unchecked")
Serializer<T> originalSerializerT = (Serializer<T>) originalSerializerClass.getDeclaredConstructor()
.newInstance();
this.originalSerializer = originalSerializerT;
this.originalSerializer.configure(configs, isKey);
Superstream superstreamConn = (Superstream) configs.get(Consts.superstreamConnectionKey);
if (superstreamConn == null) {
System.out.println("Failed to connect to Superstream");
} else {
this.superstreamConnection = superstreamConn;
}
String configuredCompressionType = (String) configs.get(ProducerConfig.COMPRESSION_TYPE_CONFIG);
this.producerCompressionEnabled = configuredCompressionType != null
&& !configuredCompressionType.equals("none");
if (this.superstreamConnection != null) {
// this.superstreamConnection.setCompressionUpdateCallback(this::onCompressionUpdate);
this.compressionType = this.superstreamConnection.compressionEnabled ? "zstd" : "none";
}
this.compressionType = this.producerCompressionEnabled ? configuredCompressionType : "none";
} catch (Exception e) {
String errMsg = String.format("Superstream: Error initializing serializer: %s", e.getMessage());
if (superstreamConnection != null) {
superstreamConnection.handleError(errMsg);
}
System.out.println(errMsg);
}
}
private void onCompressionUpdate(boolean enabled, String type) {
if (!this.producerCompressionEnabled) {
this.compressionType = enabled ? type : "none";
}
}
@Override
public byte[] serialize(String topic, T data) {
byte[] serializedData = originalSerializer.serialize(topic, data);
return serializedData;
}
@Override
public byte[] serialize(String topic, Headers headers, T data) {
if (originalSerializer == null) {
return null;
}
byte[] serializedData = this.originalSerializer.serialize(topic, headers, data);
byte[] serializedResult = serializedData;
if (serializedData == null) {
return null;
}
if (superstreamConnection != null && superstreamConnection.superstreamReady) {
// superstreamConnection.clientCounters.incrementTotalBytesBeforeReduction(serializedData.length);
if (superstreamConnection.reductionEnabled && superstreamConnection.descriptor != null) {
try {
JsonToProtoResult jsonToProtoResult = superstreamConnection.jsonToProto(serializedData);
if (jsonToProtoResult.isSuccess()) {
serializedResult = jsonToProtoResult.getMessageBytes();
// superstreamConnection.clientCounters.incrementTotalMessagesSuccessfullyProduce();
Header header = new RecordHeader("superstream_schema",
superstreamConnection.ProducerSchemaID.getBytes(StandardCharsets.UTF_8));
headers.add(header);
}
} catch (Exception e) {
superstreamConnection.handleError(String.format("error serializing data: %s", e.getMessage()));
// superstreamConnection.clientCounters.incrementTotalMessagesFailedProduce();
}
} else if (superstreamConnection.reductionEnabled) {
if (superstreamConnection.learningFactorCounter <= superstreamConnection.learningFactor) {
superstreamConnection.sendLearningMessage(serializedData);
superstreamConnection.learningFactorCounter++;
} else if (!superstreamConnection.learningRequestSent) {
superstreamConnection.sendRegisterSchemaReq();
}
}
if (superstreamConnection.compressionEnabled && !producerCompressionEnabled) {
headers.add(new RecordHeader("superstream-compression", "on".getBytes(StandardCharsets.UTF_8)));
}
}
return serializedResult;
}
@Override
public void close() {
if (this.originalSerializer != null) {
originalSerializer.close();
}
if (superstreamConnection != null) {
superstreamConnection.close();
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/AbstractIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.Iterator;
import java.util.NoSuchElementException;
/**
* A base class that simplifies implementing an iterator
* @param <T> The type of thing we are iterating over
*/
public abstract class AbstractIterator<T> implements Iterator<T> {
private enum State {
READY, NOT_READY, DONE, FAILED
}
private State state = State.NOT_READY;
private T next;
@Override
public boolean hasNext() {
switch (state) {
case FAILED:
throw new IllegalStateException("Iterator is in failed state");
case DONE:
return false;
case READY:
return true;
default:
return maybeComputeNext();
}
}
@Override
public T next() {
if (!hasNext())
throw new NoSuchElementException();
state = State.NOT_READY;
if (next == null)
throw new IllegalStateException("Expected item but none found.");
return next;
}
@Override
public void remove() {
throw new UnsupportedOperationException("Removal not supported");
}
public T peek() {
if (!hasNext())
throw new NoSuchElementException();
return next;
}
protected T allDone() {
state = State.DONE;
return null;
}
protected abstract T makeNext();
private Boolean maybeComputeNext() {
state = State.FAILED;
next = makeNext();
if (state == State.DONE) {
return false;
} else {
state = State.READY;
return true;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/AppInfoParser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.io.InputStream;
import java.lang.management.ManagementFactory;
import java.util.Properties;
import javax.management.JMException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.metrics.Gauge;
import org.apache.kafka.common.metrics.MetricConfig;
import org.apache.kafka.common.metrics.Metrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AppInfoParser {
private static final Logger log = LoggerFactory.getLogger(AppInfoParser.class);
private static final String VERSION;
private static final String COMMIT_ID;
protected static final String DEFAULT_VALUE = "unknown";
static {
Properties props = new Properties();
try (InputStream resourceStream = AppInfoParser.class.getResourceAsStream("/kafka/kafka-version.properties")) {
props.load(resourceStream);
} catch (Exception e) {
log.warn("Error while loading kafka-version.properties: {}", e.getMessage());
}
VERSION = props.getProperty("version", DEFAULT_VALUE).trim();
COMMIT_ID = props.getProperty("commitId", DEFAULT_VALUE).trim();
}
public static String getVersion() {
return VERSION;
}
public static String getCommitId() {
return COMMIT_ID;
}
public static synchronized void registerAppInfo(String prefix, String id, Metrics metrics, long nowMs) {
try {
ObjectName name = new ObjectName(prefix + ":type=app-info,id=" + Sanitizer.jmxSanitize(id));
AppInfo mBean = new AppInfo(nowMs);
ManagementFactory.getPlatformMBeanServer().registerMBean(mBean, name);
registerMetrics(metrics, mBean); // prefix will be added later by JmxReporter
} catch (JMException e) {
log.warn("Error registering AppInfo mbean", e);
}
}
public static synchronized void unregisterAppInfo(String prefix, String id, Metrics metrics) {
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
try {
ObjectName name = new ObjectName(prefix + ":type=app-info,id=" + Sanitizer.jmxSanitize(id));
if (server.isRegistered(name))
server.unregisterMBean(name);
unregisterMetrics(metrics);
} catch (JMException e) {
log.warn("Error unregistering AppInfo mbean", e);
} finally {
log.info("App info {} for {} unregistered", prefix, id);
}
}
private static MetricName metricName(Metrics metrics, String name) {
return metrics.metricName(name, "app-info", "Metric indicating " + name);
}
private static void registerMetrics(Metrics metrics, AppInfo appInfo) {
if (metrics != null) {
metrics.addMetric(metricName(metrics, "version"), new ImmutableValue<>(appInfo.getVersion()));
metrics.addMetric(metricName(metrics, "commit-id"), new ImmutableValue<>(appInfo.getCommitId()));
metrics.addMetric(metricName(metrics, "start-time-ms"), new ImmutableValue<>(appInfo.getStartTimeMs()));
}
}
private static void unregisterMetrics(Metrics metrics) {
if (metrics != null) {
metrics.removeMetric(metricName(metrics, "version"));
metrics.removeMetric(metricName(metrics, "commit-id"));
metrics.removeMetric(metricName(metrics, "start-time-ms"));
}
}
public interface AppInfoMBean {
String getVersion();
String getCommitId();
Long getStartTimeMs();
}
public static class AppInfo implements AppInfoMBean {
private final Long startTimeMs;
public AppInfo(long startTimeMs) {
this.startTimeMs = startTimeMs;
log.info("Kafka version: {}", AppInfoParser.getVersion());
log.info("Kafka commitId: {}", AppInfoParser.getCommitId());
log.info("Kafka startTimeMs: {}", startTimeMs);
}
@Override
public String getVersion() {
return AppInfoParser.getVersion();
}
@Override
public String getCommitId() {
return AppInfoParser.getCommitId();
}
@Override
public Long getStartTimeMs() {
return startTimeMs;
}
}
static class ImmutableValue<T> implements Gauge<T> {
private final T value;
public ImmutableValue(T value) {
this.value = value;
}
@Override
public T value(MetricConfig config, long now) {
return value;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/BufferSupplier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.nio.ByteBuffer;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.HashMap;
import java.util.Map;
/**
* Simple non-threadsafe interface for caching byte buffers. This is suitable for simple cases like ensuring that
* a given KafkaConsumer reuses the same decompression buffer when iterating over fetched records. For small record
* batches, allocating a potentially large buffer (64 KB for LZ4) will dominate the cost of decompressing and
* iterating over the records in the batch.
*/
public abstract class BufferSupplier implements AutoCloseable {
public static final BufferSupplier NO_CACHING = new BufferSupplier() {
@Override
public ByteBuffer get(int capacity) {
return ByteBuffer.allocate(capacity);
}
@Override
public void release(ByteBuffer buffer) {}
@Override
public void close() {}
};
public static BufferSupplier create() {
return new DefaultSupplier();
}
/**
* Supply a buffer with the required capacity. This may return a cached buffer or allocate a new instance.
*/
public abstract ByteBuffer get(int capacity);
/**
* Return the provided buffer to be reused by a subsequent call to `get`.
*/
public abstract void release(ByteBuffer buffer);
/**
* Release all resources associated with this supplier.
*/
public abstract void close();
private static class DefaultSupplier extends BufferSupplier {
// We currently use a single block size, so optimise for that case
private final Map<Integer, Deque<ByteBuffer>> bufferMap = new HashMap<>(1);
@Override
public ByteBuffer get(int size) {
Deque<ByteBuffer> bufferQueue = bufferMap.get(size);
if (bufferQueue == null || bufferQueue.isEmpty())
return ByteBuffer.allocate(size);
else
return bufferQueue.pollFirst();
}
@Override
public void release(ByteBuffer buffer) {
buffer.clear();
Deque<ByteBuffer> bufferQueue = bufferMap.get(buffer.capacity());
if (bufferQueue == null) {
// We currently keep a single buffer in flight, so optimise for that case
bufferQueue = new ArrayDeque<>(1);
bufferMap.put(buffer.capacity(), bufferQueue);
}
bufferQueue.addLast(buffer);
}
@Override
public void close() {
bufferMap.clear();
}
}
/**
* Simple buffer supplier for single-threaded usage. It caches a single buffer, which grows
* monotonically as needed to fulfill the allocation request.
*/
public static class GrowableBufferSupplier extends BufferSupplier {
private ByteBuffer cachedBuffer;
@Override
public ByteBuffer get(int minCapacity) {
if (cachedBuffer != null && cachedBuffer.capacity() >= minCapacity) {
ByteBuffer res = cachedBuffer;
cachedBuffer = null;
return res;
} else {
cachedBuffer = null;
return ByteBuffer.allocate(minCapacity);
}
}
@Override
public void release(ByteBuffer buffer) {
buffer.clear();
cachedBuffer = buffer;
}
@Override
public void close() {
cachedBuffer = null;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/ByteBufferInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.io.InputStream;
import java.nio.ByteBuffer;
/**
* A byte buffer backed input inputStream
*/
public final class ByteBufferInputStream extends InputStream {
private final ByteBuffer buffer;
public ByteBufferInputStream(ByteBuffer buffer) {
this.buffer = buffer;
}
public int read() {
if (!buffer.hasRemaining()) {
return -1;
}
return buffer.get() & 0xFF;
}
public int read(byte[] bytes, int off, int len) {
if (len == 0) {
return 0;
}
if (!buffer.hasRemaining()) {
return -1;
}
len = Math.min(len, buffer.remaining());
buffer.get(bytes, off, len);
return len;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/ByteBufferOutputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.io.OutputStream;
import java.nio.ByteBuffer;
/**
* A ByteBuffer-backed OutputStream that expands the internal ByteBuffer as required. Given this, the caller should
* always access the underlying ByteBuffer via the {@link #buffer()} method until all writes are completed.
*
* This class is typically used for 2 purposes:
*
* 1. Write to a ByteBuffer when there is a chance that we may need to expand it in order to fit all the desired data
* 2. Write to a ByteBuffer via methods that expect an OutputStream interface
*
* Hard to track bugs can happen when this class is used for the second reason and unexpected buffer expansion happens.
* So, it's best to assume that buffer expansion can always happen. An improvement would be to create a separate class
* that throws an error if buffer expansion is required to avoid the issue altogether.
*/
public class ByteBufferOutputStream extends OutputStream {
private static final float REALLOCATION_FACTOR = 1.1f;
private final int initialCapacity;
private final int initialPosition;
private ByteBuffer buffer;
/**
* Creates an instance of this class that will write to the received `buffer` up to its `limit`. If necessary to
* satisfy `write` or `position` calls, larger buffers will be allocated so the {@link #buffer()} method may return
* a different buffer than the received `buffer` parameter.
*
* Prefer one of the constructors that allocate the internal buffer for clearer semantics.
*/
public ByteBufferOutputStream(ByteBuffer buffer) {
this.buffer = buffer;
this.initialPosition = buffer.position();
this.initialCapacity = buffer.capacity();
}
public ByteBufferOutputStream(int initialCapacity) {
this(initialCapacity, false);
}
public ByteBufferOutputStream(int initialCapacity, boolean directBuffer) {
this(directBuffer ? ByteBuffer.allocateDirect(initialCapacity) : ByteBuffer.allocate(initialCapacity));
}
public void write(int b) {
ensureRemaining(1);
buffer.put((byte) b);
}
public void write(byte[] bytes, int off, int len) {
ensureRemaining(len);
buffer.put(bytes, off, len);
}
public void write(ByteBuffer sourceBuffer) {
ensureRemaining(sourceBuffer.remaining());
buffer.put(sourceBuffer);
}
public ByteBuffer buffer() {
return buffer;
}
public int position() {
return buffer.position();
}
public int remaining() {
return buffer.remaining();
}
public int limit() {
return buffer.limit();
}
public void position(int position) {
ensureRemaining(position - buffer.position());
buffer.position(position);
}
/**
* The capacity of the first internal ByteBuffer used by this class. This is useful in cases where a pooled
* ByteBuffer was passed via the constructor and it needs to be returned to the pool.
*/
public int initialCapacity() {
return initialCapacity;
}
/**
* Ensure there is enough space to write some number of bytes, expanding the underlying buffer if necessary.
* This can be used to avoid incremental expansions through calls to {@link #write(int)} when you know how
* many total bytes are needed.
*
* @param remainingBytesRequired The number of bytes required
*/
public void ensureRemaining(int remainingBytesRequired) {
if (remainingBytesRequired > buffer.remaining())
expandBuffer(remainingBytesRequired);
}
private void expandBuffer(int remainingRequired) {
int expandSize = Math.max((int) (buffer.limit() * REALLOCATION_FACTOR), buffer.position() + remainingRequired);
ByteBuffer temp = ByteBuffer.allocate(expandSize);
int limit = limit();
buffer.flip();
temp.put(buffer);
buffer.limit(limit);
// reset the old buffer's position so that the partial data in the new buffer cannot be mistakenly consumed
// we should ideally only do this for the original buffer, but the additional complexity doesn't seem worth it
buffer.position(initialPosition);
buffer = temp;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/ByteBufferUnmapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.io.IOException;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import static java.lang.invoke.MethodHandles.constant;
import static java.lang.invoke.MethodHandles.dropArguments;
import static java.lang.invoke.MethodHandles.filterReturnValue;
import static java.lang.invoke.MethodHandles.guardWithTest;
import static java.lang.invoke.MethodHandles.lookup;
import static java.lang.invoke.MethodType.methodType;
/**
* Provides a mechanism to unmap mapped and direct byte buffers.
*
* The implementation was inspired by the one in Lucene's MMapDirectory.
*/
public final class ByteBufferUnmapper {
// null if unmap is not supported
private static final MethodHandle UNMAP;
// null if unmap is supported
private static final RuntimeException UNMAP_NOT_SUPPORTED_EXCEPTION;
static {
Object unmap = null;
RuntimeException exception = null;
try {
unmap = lookupUnmapMethodHandle();
} catch (RuntimeException e) {
exception = e;
}
if (unmap != null) {
UNMAP = (MethodHandle) unmap;
UNMAP_NOT_SUPPORTED_EXCEPTION = null;
} else {
UNMAP = null;
UNMAP_NOT_SUPPORTED_EXCEPTION = exception;
}
}
private ByteBufferUnmapper() {}
/**
* Unmap the provided mapped or direct byte buffer.
*
* This buffer cannot be referenced after this call, so it's highly recommended that any fields referencing it
* should be set to null.
*
* @throws IllegalArgumentException if buffer is not mapped or direct.
*/
public static void unmap(String resourceDescription, ByteBuffer buffer) throws IOException {
if (!buffer.isDirect())
throw new IllegalArgumentException("Unmapping only works with direct buffers");
if (UNMAP == null)
throw UNMAP_NOT_SUPPORTED_EXCEPTION;
try {
UNMAP.invokeExact(buffer);
} catch (Throwable throwable) {
throw new IOException("Unable to unmap the mapped buffer: " + resourceDescription, throwable);
}
}
private static MethodHandle lookupUnmapMethodHandle() {
final MethodHandles.Lookup lookup = lookup();
try {
if (Java.IS_JAVA9_COMPATIBLE)
return unmapJava9(lookup);
else
return unmapJava7Or8(lookup);
} catch (ReflectiveOperationException | RuntimeException e1) {
throw new UnsupportedOperationException("Unmapping is not supported on this platform, because internal " +
"Java APIs are not compatible with this Kafka version", e1);
}
}
private static MethodHandle unmapJava7Or8(MethodHandles.Lookup lookup) throws ReflectiveOperationException {
/* "Compile" a MethodHandle that is roughly equivalent to the following lambda:
*
* (ByteBuffer buffer) -> {
* sun.misc.Cleaner cleaner = ((java.nio.DirectByteBuffer) byteBuffer).cleaner();
* if (nonNull(cleaner))
* cleaner.clean();
* else
* noop(cleaner); // the noop is needed because MethodHandles#guardWithTest always needs both if and else
* }
*/
Class<?> directBufferClass = Class.forName("java.nio.DirectByteBuffer");
Method m = directBufferClass.getMethod("cleaner");
m.setAccessible(true);
MethodHandle directBufferCleanerMethod = lookup.unreflect(m);
Class<?> cleanerClass = directBufferCleanerMethod.type().returnType();
MethodHandle cleanMethod = lookup.findVirtual(cleanerClass, "clean", methodType(void.class));
MethodHandle nonNullTest = lookup.findStatic(ByteBufferUnmapper.class, "nonNull",
methodType(boolean.class, Object.class)).asType(methodType(boolean.class, cleanerClass));
MethodHandle noop = dropArguments(constant(Void.class, null).asType(methodType(void.class)), 0, cleanerClass);
MethodHandle unmapper = filterReturnValue(directBufferCleanerMethod, guardWithTest(nonNullTest, cleanMethod, noop))
.asType(methodType(void.class, ByteBuffer.class));
return unmapper;
}
private static MethodHandle unmapJava9(MethodHandles.Lookup lookup) throws ReflectiveOperationException {
Class<?> unsafeClass = Class.forName("sun.misc.Unsafe");
MethodHandle unmapper = lookup.findVirtual(unsafeClass, "invokeCleaner",
methodType(void.class, ByteBuffer.class));
Field f = unsafeClass.getDeclaredField("theUnsafe");
f.setAccessible(true);
Object theUnsafe = f.get(null);
return unmapper.bindTo(theUnsafe);
}
private static boolean nonNull(Object o) {
return o != null;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/ByteUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
/**
* This classes exposes low-level methods for reading/writing from byte streams or buffers.
*/
public final class ByteUtils {
public static final ByteBuffer EMPTY_BUF = ByteBuffer.wrap(new byte[0]);
private ByteUtils() {}
/**
* Read an unsigned integer from the current position in the buffer, incrementing the position by 4 bytes
*
* @param buffer The buffer to read from
* @return The integer read, as a long to avoid signedness
*/
public static long readUnsignedInt(ByteBuffer buffer) {
return buffer.getInt() & 0xffffffffL;
}
/**
* Read an unsigned integer from the given position without modifying the buffers position
*
* @param buffer the buffer to read from
* @param index the index from which to read the integer
* @return The integer read, as a long to avoid signedness
*/
public static long readUnsignedInt(ByteBuffer buffer, int index) {
return buffer.getInt(index) & 0xffffffffL;
}
/**
* Read an unsigned integer stored in little-endian format from the {@link InputStream}.
*
* @param in The stream to read from
* @return The integer read (MUST BE TREATED WITH SPECIAL CARE TO AVOID SIGNEDNESS)
*/
public static int readUnsignedIntLE(InputStream in) throws IOException {
return in.read()
| (in.read() << 8)
| (in.read() << 16)
| (in.read() << 24);
}
/**
* Read an unsigned integer stored in little-endian format from a byte array
* at a given offset.
*
* @param buffer The byte array to read from
* @param offset The position in buffer to read from
* @return The integer read (MUST BE TREATED WITH SPECIAL CARE TO AVOID SIGNEDNESS)
*/
public static int readUnsignedIntLE(byte[] buffer, int offset) {
return (buffer[offset] << 0 & 0xff)
| ((buffer[offset + 1] & 0xff) << 8)
| ((buffer[offset + 2] & 0xff) << 16)
| ((buffer[offset + 3] & 0xff) << 24);
}
/**
* Read a big-endian integer from a byte array
*/
public static int readIntBE(byte[] buffer, int offset) {
return ((buffer[offset] & 0xFF) << 24)
| ((buffer[offset + 1] & 0xFF) << 16)
| ((buffer[offset + 2] & 0xFF) << 8)
| (buffer[offset + 3] & 0xFF);
}
/**
* Write the given long value as a 4 byte unsigned integer. Overflow is ignored.
*
* @param buffer The buffer to write to
* @param index The position in the buffer at which to begin writing
* @param value The value to write
*/
public static void writeUnsignedInt(ByteBuffer buffer, int index, long value) {
buffer.putInt(index, (int) (value & 0xffffffffL));
}
/**
* Write the given long value as a 4 byte unsigned integer. Overflow is ignored.
*
* @param buffer The buffer to write to
* @param value The value to write
*/
public static void writeUnsignedInt(ByteBuffer buffer, long value) {
buffer.putInt((int) (value & 0xffffffffL));
}
/**
* Write an unsigned integer in little-endian format to the {@link OutputStream}.
*
* @param out The stream to write to
* @param value The value to write
*/
public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException {
out.write(value);
out.write(value >>> 8);
out.write(value >>> 16);
out.write(value >>> 24);
}
/**
* Write an unsigned integer in little-endian format to a byte array
* at a given offset.
*
* @param buffer The byte array to write to
* @param offset The position in buffer to write to
* @param value The value to write
*/
public static void writeUnsignedIntLE(byte[] buffer, int offset, int value) {
buffer[offset] = (byte) value;
buffer[offset + 1] = (byte) (value >>> 8);
buffer[offset + 2] = (byte) (value >>> 16);
buffer[offset + 3] = (byte) (value >>> 24);
}
/**
* Read an integer stored in variable-length format using unsigned decoding from
* <a href="http://code.google.com/apis/protocolbuffers/docs/encoding.html"> Google Protocol Buffers</a>.
*
* @param buffer The buffer to read from
* @return The integer read
*
* @throws IllegalArgumentException if variable-length value does not terminate after 5 bytes have been read
*/
public static int readUnsignedVarint(ByteBuffer buffer) {
int value = 0;
int i = 0;
int b;
while (((b = buffer.get()) & 0x80) != 0) {
value |= (b & 0x7f) << i;
i += 7;
if (i > 28)
throw illegalVarintException(value);
}
value |= b << i;
return value;
}
/**
* Read an integer stored in variable-length format using unsigned decoding from
* <a href="http://code.google.com/apis/protocolbuffers/docs/encoding.html"> Google Protocol Buffers</a>.
*
* @param in The input to read from
* @return The integer read
*
* @throws IllegalArgumentException if variable-length value does not terminate after 5 bytes have been read
* @throws IOException if {@link DataInput} throws {@link IOException}
*/
public static int readUnsignedVarint(DataInput in) throws IOException {
int value = 0;
int i = 0;
int b;
while (((b = in.readByte()) & 0x80) != 0) {
value |= (b & 0x7f) << i;
i += 7;
if (i > 28)
throw illegalVarintException(value);
}
value |= b << i;
return value;
}
/**
* Read an integer stored in variable-length format using zig-zag decoding from
* <a href="http://code.google.com/apis/protocolbuffers/docs/encoding.html"> Google Protocol Buffers</a>.
*
* @param buffer The buffer to read from
* @return The integer read
*
* @throws IllegalArgumentException if variable-length value does not terminate after 5 bytes have been read
*/
public static int readVarint(ByteBuffer buffer) {
int value = readUnsignedVarint(buffer);
return (value >>> 1) ^ -(value & 1);
}
/**
* Read an integer stored in variable-length format using zig-zag decoding from
* <a href="http://code.google.com/apis/protocolbuffers/docs/encoding.html"> Google Protocol Buffers</a>.
*
* @param in The input to read from
* @return The integer read
*
* @throws IllegalArgumentException if variable-length value does not terminate after 5 bytes have been read
* @throws IOException if {@link DataInput} throws {@link IOException}
*/
public static int readVarint(DataInput in) throws IOException {
int value = readUnsignedVarint(in);
return (value >>> 1) ^ -(value & 1);
}
/**
* Read a long stored in variable-length format using zig-zag decoding from
* <a href="http://code.google.com/apis/protocolbuffers/docs/encoding.html"> Google Protocol Buffers</a>.
*
* @param in The input to read from
* @return The long value read
*
* @throws IllegalArgumentException if variable-length value does not terminate after 10 bytes have been read
* @throws IOException if {@link DataInput} throws {@link IOException}
*/
public static long readVarlong(DataInput in) throws IOException {
long value = 0L;
int i = 0;
long b;
while (((b = in.readByte()) & 0x80) != 0) {
value |= (b & 0x7f) << i;
i += 7;
if (i > 63)
throw illegalVarlongException(value);
}
value |= b << i;
return (value >>> 1) ^ -(value & 1);
}
/**
* Read a long stored in variable-length format using zig-zag decoding from
* <a href="http://code.google.com/apis/protocolbuffers/docs/encoding.html"> Google Protocol Buffers</a>.
*
* @param buffer The buffer to read from
* @return The long value read
*
* @throws IllegalArgumentException if variable-length value does not terminate after 10 bytes have been read
*/
public static long readVarlong(ByteBuffer buffer) {
long value = 0L;
int i = 0;
long b;
while (((b = buffer.get()) & 0x80) != 0) {
value |= (b & 0x7f) << i;
i += 7;
if (i > 63)
throw illegalVarlongException(value);
}
value |= b << i;
return (value >>> 1) ^ -(value & 1);
}
/**
* Read a double-precision 64-bit format IEEE 754 value.
*
* @param in The input to read from
* @return The double value read
*/
public static double readDouble(DataInput in) throws IOException {
return in.readDouble();
}
/**
* Read a double-precision 64-bit format IEEE 754 value.
*
* @param buffer The buffer to read from
* @return The long value read
*/
public static double readDouble(ByteBuffer buffer) {
return buffer.getDouble();
}
/**
* Write the given integer following the variable-length unsigned encoding from
* <a href="http://code.google.com/apis/protocolbuffers/docs/encoding.html"> Google Protocol Buffers</a>
* into the buffer.
*
* @param value The value to write
* @param buffer The output to write to
*/
public static void writeUnsignedVarint(int value, ByteBuffer buffer) {
while ((value & 0xffffff80) != 0L) {
byte b = (byte) ((value & 0x7f) | 0x80);
buffer.put(b);
value >>>= 7;
}
buffer.put((byte) value);
}
/**
* Write the given integer following the variable-length unsigned encoding from
* <a href="http://code.google.com/apis/protocolbuffers/docs/encoding.html"> Google Protocol Buffers</a>
* into the buffer.
*
* @param value The value to write
* @param out The output to write to
*/
public static void writeUnsignedVarint(int value, DataOutput out) throws IOException {
while ((value & 0xffffff80) != 0L) {
byte b = (byte) ((value & 0x7f) | 0x80);
out.writeByte(b);
value >>>= 7;
}
out.writeByte((byte) value);
}
/**
* Write the given integer following the variable-length zig-zag encoding from
* <a href="http://code.google.com/apis/protocolbuffers/docs/encoding.html"> Google Protocol Buffers</a>
* into the output.
*
* @param value The value to write
* @param out The output to write to
*/
public static void writeVarint(int value, DataOutput out) throws IOException {
writeUnsignedVarint((value << 1) ^ (value >> 31), out);
}
/**
* Write the given integer following the variable-length zig-zag encoding from
* <a href="http://code.google.com/apis/protocolbuffers/docs/encoding.html"> Google Protocol Buffers</a>
* into the buffer.
*
* @param value The value to write
* @param buffer The output to write to
*/
public static void writeVarint(int value, ByteBuffer buffer) {
writeUnsignedVarint((value << 1) ^ (value >> 31), buffer);
}
/**
* Write the given integer following the variable-length zig-zag encoding from
* <a href="http://code.google.com/apis/protocolbuffers/docs/encoding.html"> Google Protocol Buffers</a>
* into the output.
*
* @param value The value to write
* @param out The output to write to
*/
public static void writeVarlong(long value, DataOutput out) throws IOException {
long v = (value << 1) ^ (value >> 63);
while ((v & 0xffffffffffffff80L) != 0L) {
out.writeByte(((int) v & 0x7f) | 0x80);
v >>>= 7;
}
out.writeByte((byte) v);
}
/**
* Write the given integer following the variable-length zig-zag encoding from
* <a href="http://code.google.com/apis/protocolbuffers/docs/encoding.html"> Google Protocol Buffers</a>
* into the buffer.
*
* @param value The value to write
* @param buffer The buffer to write to
*/
public static void writeVarlong(long value, ByteBuffer buffer) {
long v = (value << 1) ^ (value >> 63);
while ((v & 0xffffffffffffff80L) != 0L) {
byte b = (byte) ((v & 0x7f) | 0x80);
buffer.put(b);
v >>>= 7;
}
buffer.put((byte) v);
}
/**
* Write the given double following the double-precision 64-bit format IEEE 754 value into the output.
*
* @param value The value to write
* @param out The output to write to
*/
public static void writeDouble(double value, DataOutput out) throws IOException {
out.writeDouble(value);
}
/**
* Write the given double following the double-precision 64-bit format IEEE 754 value into the buffer.
*
* @param value The value to write
* @param buffer The buffer to write to
*/
public static void writeDouble(double value, ByteBuffer buffer) {
buffer.putDouble(value);
}
/**
* Number of bytes needed to encode an integer in unsigned variable-length format.
*
* @param value The signed value
*
* @see #writeUnsignedVarint(int, DataOutput)
*/
public static int sizeOfUnsignedVarint(int value) {
// Protocol buffers varint encoding is variable length, with a minimum of 1 byte
// (for zero). The values themselves are not important. What's important here is
// any leading zero bits are dropped from output. We can use this leading zero
// count w/ fast intrinsic to calc the output length directly.
// Test cases verify this matches the output for loop logic exactly.
// return (38 - leadingZeros) / 7 + leadingZeros / 32;
// The above formula provides the implementation, but the Java encoding is suboptimal
// when we have a narrow range of integers, so we can do better manually
int leadingZeros = Integer.numberOfLeadingZeros(value);
int leadingZerosBelow38DividedBy7 = ((38 - leadingZeros) * 0b10010010010010011) >>> 19;
return leadingZerosBelow38DividedBy7 + (leadingZeros >>> 5);
}
/**
* Number of bytes needed to encode an integer in variable-length format.
*
* @param value The signed value
*/
public static int sizeOfVarint(int value) {
return sizeOfUnsignedVarint((value << 1) ^ (value >> 31));
}
/**
* Number of bytes needed to encode a long in variable-length format.
*
* @param value The signed value
* @see #sizeOfUnsignedVarint(int)
*/
public static int sizeOfVarlong(long value) {
long v = (value << 1) ^ (value >> 63);
// For implementation notes @see #sizeOfUnsignedVarint(int)
// Similar logic is applied to allow for 64bit input -> 1-9byte output.
// return (70 - leadingZeros) / 7 + leadingZeros / 64;
int leadingZeros = Long.numberOfLeadingZeros(v);
int leadingZerosBelow70DividedBy7 = ((70 - leadingZeros) * 0b10010010010010011) >>> 19;
return leadingZerosBelow70DividedBy7 + (leadingZeros >>> 6);
}
private static IllegalArgumentException illegalVarintException(int value) {
throw new IllegalArgumentException("Varint is too long, the most significant bit in the 5th byte is set, " +
"converted value: " + Integer.toHexString(value));
}
private static IllegalArgumentException illegalVarlongException(long value) {
throw new IllegalArgumentException("Varlong is too long, most significant bit in the 10th byte is set, " +
"converted value: " + Long.toHexString(value));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/Bytes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Comparator;
/**
* Utility class that handles immutable byte arrays.
*/
public class Bytes implements Comparable<Bytes> {
public static final byte[] EMPTY = new byte[0];
private static final char[] HEX_CHARS_UPPER = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'};
private final byte[] bytes;
// cache the hash code for the string, default to 0
private int hashCode;
public static Bytes wrap(byte[] bytes) {
if (bytes == null)
return null;
return new Bytes(bytes);
}
/**
* Create a Bytes using the byte array.
*
* @param bytes This array becomes the backing storage for the object.
*/
public Bytes(byte[] bytes) {
this.bytes = bytes;
// initialize hash code to 0
hashCode = 0;
}
/**
* Get the data from the Bytes.
* @return The underlying byte array
*/
public byte[] get() {
return this.bytes;
}
/**
* The hashcode is cached except for the case where it is computed as 0, in which
* case we compute the hashcode on every call.
*
* @return the hashcode
*/
@Override
public int hashCode() {
if (hashCode == 0) {
hashCode = Arrays.hashCode(bytes);
}
return hashCode;
}
@Override
public boolean equals(Object other) {
if (this == other)
return true;
if (other == null)
return false;
// we intentionally use the function to compute hashcode here
if (this.hashCode() != other.hashCode())
return false;
if (other instanceof Bytes)
return Arrays.equals(this.bytes, ((Bytes) other).get());
return false;
}
@Override
public int compareTo(Bytes that) {
return BYTES_LEXICO_COMPARATOR.compare(this.bytes, that.bytes);
}
@Override
public String toString() {
return Bytes.toString(bytes, 0, bytes.length);
}
/**
* Write a printable representation of a byte array. Non-printable
* characters are hex escaped in the format \\x%02X, eg:
* \x00 \x05 etc.
*
* This function is brought from org.apache.hadoop.hbase.util.Bytes
*
* @param b array to write out
* @param off offset to start at
* @param len length to write
* @return string output
*/
private static String toString(final byte[] b, int off, int len) {
StringBuilder result = new StringBuilder();
if (b == null)
return result.toString();
// just in case we are passed a 'len' that is > buffer length...
if (off >= b.length)
return result.toString();
if (off + len > b.length)
len = b.length - off;
for (int i = off; i < off + len; ++i) {
int ch = b[i] & 0xFF;
if (ch >= ' ' && ch <= '~' && ch != '\\') {
result.append((char) ch);
} else {
result.append("\\x");
result.append(HEX_CHARS_UPPER[ch / 0x10]);
result.append(HEX_CHARS_UPPER[ch % 0x10]);
}
}
return result.toString();
}
/**
* Increment the underlying byte array by adding 1. Throws an IndexOutOfBoundsException if incrementing would cause
* the underlying input byte array to overflow.
*
* @param input - The byte array to increment
* @return A new copy of the incremented byte array.
*/
public static Bytes increment(Bytes input) throws IndexOutOfBoundsException {
byte[] inputArr = input.get();
byte[] ret = new byte[inputArr.length];
int carry = 1;
for (int i = inputArr.length - 1; i >= 0; i--) {
if (inputArr[i] == (byte) 0xFF && carry == 1) {
ret[i] = (byte) 0x00;
} else {
ret[i] = (byte) (inputArr[i] + carry);
carry = 0;
}
}
if (carry == 0) {
return wrap(ret);
} else {
throw new IndexOutOfBoundsException();
}
}
/**
* A byte array comparator based on lexicograpic ordering.
*/
public final static ByteArrayComparator BYTES_LEXICO_COMPARATOR = new LexicographicByteArrayComparator();
public interface ByteArrayComparator extends Comparator<byte[]>, Serializable {
int compare(final byte[] buffer1, int offset1, int length1,
final byte[] buffer2, int offset2, int length2);
}
private static class LexicographicByteArrayComparator implements ByteArrayComparator {
@Override
public int compare(byte[] buffer1, byte[] buffer2) {
return compare(buffer1, 0, buffer1.length, buffer2, 0, buffer2.length);
}
public int compare(final byte[] buffer1, int offset1, int length1,
final byte[] buffer2, int offset2, int length2) {
// short circuit equal case
if (buffer1 == buffer2 &&
offset1 == offset2 &&
length1 == length2) {
return 0;
}
// similar to Arrays.compare() but considers offset and length
int end1 = offset1 + length1;
int end2 = offset2 + length2;
for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) {
int a = buffer1[i] & 0xff;
int b = buffer2[j] & 0xff;
if (a != b) {
return a - b;
}
}
return length1 - length2;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/Checksums.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.nio.ByteBuffer;
import java.util.zip.Checksum;
/**
* Utility methods for `Checksum` instances.
*
* Implementation note: we can add methods to our implementations of CRC32 and CRC32C, but we cannot do the same for
* the Java implementations (we prefer the Java 9 implementation of CRC32C if available). A utility class is the
* simplest way to add methods that are useful for all Checksum implementations.
*
* NOTE: This class is intended for INTERNAL usage only within Kafka.
*/
public final class Checksums {
private static final MethodHandle BYTE_BUFFER_UPDATE;
static {
MethodHandle byteBufferUpdate = null;
if (Java.IS_JAVA9_COMPATIBLE) {
try {
byteBufferUpdate = MethodHandles.publicLookup().findVirtual(Checksum.class, "update",
MethodType.methodType(void.class, ByteBuffer.class));
} catch (Throwable t) {
handleUpdateThrowable(t);
}
}
BYTE_BUFFER_UPDATE = byteBufferUpdate;
}
private Checksums() {
}
/**
* Uses {@link Checksum#update} on {@code buffer}'s content, without modifying its position and limit.<br>
* This is semantically equivalent to {@link #update(Checksum, ByteBuffer, int, int)} with {@code offset = 0}.
*/
public static void update(Checksum checksum, ByteBuffer buffer, int length) {
update(checksum, buffer, 0, length);
}
/**
* Uses {@link Checksum#update} on {@code buffer}'s content, starting from the given {@code offset}
* by the provided {@code length}, without modifying its position and limit.
*/
public static void update(Checksum checksum, ByteBuffer buffer, int offset, int length) {
if (buffer.hasArray()) {
checksum.update(buffer.array(), buffer.position() + buffer.arrayOffset() + offset, length);
} else if (BYTE_BUFFER_UPDATE != null && buffer.isDirect()) {
final int oldPosition = buffer.position();
final int oldLimit = buffer.limit();
try {
// save a slice to be used to save an allocation in the hot-path
final int start = oldPosition + offset;
buffer.limit(start + length);
buffer.position(start);
BYTE_BUFFER_UPDATE.invokeExact(checksum, buffer);
} catch (Throwable t) {
handleUpdateThrowable(t);
} finally {
// reset buffer's offsets
buffer.limit(oldLimit);
buffer.position(oldPosition);
}
} else {
// slow-path
int start = buffer.position() + offset;
for (int i = start; i < start + length; i++) {
checksum.update(buffer.get(i));
}
}
}
private static void handleUpdateThrowable(Throwable t) {
if (t instanceof RuntimeException) {
throw (RuntimeException) t;
}
if (t instanceof Error) {
throw (Error) t;
}
throw new IllegalStateException(t);
}
public static void updateInt(Checksum checksum, int input) {
checksum.update((byte) (input >> 24));
checksum.update((byte) (input >> 16));
checksum.update((byte) (input >> 8));
checksum.update((byte) input /* >> 0 */);
}
public static void updateLong(Checksum checksum, long input) {
checksum.update((byte) (input >> 56));
checksum.update((byte) (input >> 48));
checksum.update((byte) (input >> 40));
checksum.update((byte) (input >> 32));
checksum.update((byte) (input >> 24));
checksum.update((byte) (input >> 16));
checksum.update((byte) (input >> 8));
checksum.update((byte) input /* >> 0 */);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/ChildFirstClassLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import org.apache.kafka.common.KafkaException;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Locale;
import java.util.NoSuchElementException;
/**
* A class loader that looks for classes and resources in a specified class path first, before delegating to its parent
* class loader.
*/
public class ChildFirstClassLoader extends URLClassLoader {
static {
ClassLoader.registerAsParallelCapable();
}
/**
* @param classPath Class path string
* @param parent The parent classloader. If the required class / resource cannot be found in the given classPath,
* this classloader will be used to find the class / resource.
*/
public ChildFirstClassLoader(String classPath, ClassLoader parent) {
super(classpathToURLs(classPath), parent);
}
static private URL[] classpathToURLs(String classPath) {
ArrayList<URL> urls = new ArrayList<>();
for (String path : classPath.split(File.pathSeparator)) {
if (path == null || path.trim().isEmpty())
continue;
File file = new File(path);
try {
if (path.endsWith("/*")) {
File parent = new File(new File(file.getCanonicalPath()).getParent());
if (parent.isDirectory()) {
File[] files = parent.listFiles((dir, name) -> {
String lower = name.toLowerCase(Locale.ROOT);
return lower.endsWith(".jar") || lower.endsWith(".zip");
});
if (files != null) {
for (File jarFile : files) {
urls.add(jarFile.getCanonicalFile().toURI().toURL());
}
}
}
} else if (file.exists()) {
urls.add(file.getCanonicalFile().toURI().toURL());
}
} catch (IOException e) {
throw new KafkaException(e);
}
}
return urls.toArray(new URL[0]);
}
@Override
protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
synchronized (getClassLoadingLock(name)) {
Class<?> c = findLoadedClass(name);
if (c == null) {
try {
c = findClass(name);
} catch (ClassNotFoundException e) {
// Try parent
c = super.loadClass(name, false);
}
}
if (resolve)
resolveClass(c);
return c;
}
}
@Override
public URL getResource(String name) {
URL url = findResource(name);
if (url == null) {
// try parent
url = super.getResource(name);
}
return url;
}
@Override
public Enumeration<URL> getResources(String name) throws IOException {
Enumeration<URL> urls1 = findResources(name);
Enumeration<URL> urls2 = getParent() != null ? getParent().getResources(name) : null;
return new Enumeration<URL>() {
@Override
public boolean hasMoreElements() {
return (urls1 != null && urls1.hasMoreElements()) || (urls2 != null && urls2.hasMoreElements());
}
@Override
public URL nextElement() {
if (urls1 != null && urls1.hasMoreElements())
return urls1.nextElement();
if (urls2 != null && urls2.hasMoreElements())
return urls2.nextElement();
throw new NoSuchElementException();
}
};
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/CircularIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.Collection;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.Objects;
/**
* An iterator that cycles through the {@code Iterator} of a {@code Collection}
* indefinitely. Useful for tasks such as round-robin load balancing. This class
* does not provide thread-safe access. This {@code Iterator} supports
* {@code null} elements in the underlying {@code Collection}. This
* {@code Iterator} does not support any modification to the underlying
* {@code Collection} after it has been wrapped by this class. Changing the
* underlying {@code Collection} may cause a
* {@link ConcurrentModificationException} or some other undefined behavior.
*/
public class CircularIterator<T> implements Iterator<T> {
private final Iterable<T> iterable;
private Iterator<T> iterator;
private T nextValue;
/**
* Create a new instance of a CircularIterator. The ordering of this
* Iterator will be dictated by the Iterator returned by Collection itself.
*
* @param col The collection to iterate indefinitely
*
* @throws NullPointerException if col is {@code null}
* @throws IllegalArgumentException if col is empty.
*/
public CircularIterator(final Collection<T> col) {
this.iterable = Objects.requireNonNull(col);
this.iterator = col.iterator();
if (col.isEmpty()) {
throw new IllegalArgumentException("CircularIterator can only be used on non-empty lists");
}
this.nextValue = advance();
}
/**
* Returns true since the iteration will forever cycle through the provided
* {@code Collection}.
*
* @return Always true
*/
@Override
public boolean hasNext() {
return true;
}
@Override
public T next() {
final T next = nextValue;
nextValue = advance();
return next;
}
/**
* Return the next value in the {@code Iterator}, restarting the
* {@code Iterator} if necessary.
*
* @return The next value in the iterator
*/
private T advance() {
if (!iterator.hasNext()) {
iterator = iterable.iterator();
}
return iterator.next();
}
/**
* Peek at the next value in the Iterator. Calling this method multiple
* times will return the same element without advancing this Iterator. The
* value returned by this method will be the next item returned by
* {@code next()}.
*
* @return The next value in this {@code Iterator}
*/
public T peek() {
return nextValue;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/CloseableIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.io.Closeable;
import java.util.Iterator;
/**
* Iterators that need to be closed in order to release resources should implement this interface.
*
* Warning: before implementing this interface, consider if there are better options. The chance of misuse is
* a bit high since people are used to iterating without closing.
*/
public interface CloseableIterator<T> extends Iterator<T>, Closeable {
void close();
static <R> CloseableIterator<R> wrap(Iterator<R> inner) {
return new CloseableIterator<R>() {
@Override
public void close() {}
@Override
public boolean hasNext() {
return inner.hasNext();
}
@Override
public R next() {
return inner.next();
}
@Override
public void remove() {
inner.remove();
}
};
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/CollectionUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import org.apache.kafka.common.TopicPartition;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Collection;
import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.stream.Collectors;
public final class CollectionUtils {
private CollectionUtils() {}
/**
* Given two maps (A, B), returns all the key-value pairs in A whose keys are not contained in B
*/
public static <K, V> Map<K, V> subtractMap(Map<? extends K, ? extends V> minuend, Map<? extends K, ? extends V> subtrahend) {
return minuend.entrySet().stream()
.filter(entry -> !subtrahend.containsKey(entry.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
/**
* group data by topic
*
* @param data Data to be partitioned
* @param <T> Partition data type
* @return partitioned data
*/
public static <T> Map<String, Map<Integer, T>> groupPartitionDataByTopic(Map<TopicPartition, ? extends T> data) {
Map<String, Map<Integer, T>> dataByTopic = new HashMap<>();
for (Map.Entry<TopicPartition, ? extends T> entry : data.entrySet()) {
String topic = entry.getKey().topic();
int partition = entry.getKey().partition();
Map<Integer, T> topicData = dataByTopic.computeIfAbsent(topic, t -> new HashMap<>());
topicData.put(partition, entry.getValue());
}
return dataByTopic;
}
/**
* Group a list of partitions by the topic name.
*
* @param partitions The partitions to collect
* @return partitions per topic
*/
public static Map<String, List<Integer>> groupPartitionsByTopic(Collection<TopicPartition> partitions) {
return groupPartitionsByTopic(
partitions,
topic -> new ArrayList<>(),
List::add
);
}
/**
* Group a collection of partitions by topic
*
* @return The map used to group the partitions
*/
public static <T> Map<String, T> groupPartitionsByTopic(
Collection<TopicPartition> partitions,
Function<String, T> buildGroup,
BiConsumer<T, Integer> addToGroup
) {
Map<String, T> dataByTopic = new HashMap<>();
for (TopicPartition tp : partitions) {
String topic = tp.topic();
T topicData = dataByTopic.computeIfAbsent(topic, buildGroup);
addToGroup.accept(topicData, tp.partition());
}
return dataByTopic;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/ConfigUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.ConfigKey;
import org.apache.kafka.common.config.ConfigDef.Type;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class ConfigUtils {
private static final Logger log = LoggerFactory.getLogger(ConfigUtils.class);
/**
* Translates deprecated configurations into their non-deprecated equivalents
*
* This is a convenience method for {@link ConfigUtils#translateDeprecatedConfigs(Map, Map)}
* until we can use Java 9+ {@code Map.of(..)} and {@code Set.of(...)}
*
* @param configs the input configuration
* @param aliasGroups An array of arrays of synonyms. Each synonym array begins with the non-deprecated synonym
* For example, new String[][] { { a, b }, { c, d, e} }
* would declare b as a deprecated synonym for a,
* and d and e as deprecated synonyms for c.
* The ordering of synonyms determines the order of precedence
* (e.g. the first synonym takes precedence over the second one)
* @return a new configuration map with deprecated keys translated to their non-deprecated equivalents
*/
public static <T> Map<String, T> translateDeprecatedConfigs(Map<String, T> configs, String[][] aliasGroups) {
return translateDeprecatedConfigs(configs, Stream.of(aliasGroups)
.collect(Collectors.toMap(x -> x[0], x -> Stream.of(x).skip(1).collect(Collectors.toList()))));
}
/**
* Translates deprecated configurations into their non-deprecated equivalents
*
* @param configs the input configuration
* @param aliasGroups A map of config to synonyms. Each key is the non-deprecated synonym
* For example, Map.of(a , Set.of(b), c, Set.of(d, e))
* would declare b as a deprecated synonym for a,
* and d and e as deprecated synonyms for c.
* The ordering of synonyms determines the order of precedence
* (e.g. the first synonym takes precedence over the second one)
* @return a new configuration map with deprecated keys translated to their non-deprecated equivalents
*/
public static <T> Map<String, T> translateDeprecatedConfigs(Map<String, T> configs,
Map<String, List<String>> aliasGroups) {
Set<String> aliasSet = Stream.concat(
aliasGroups.keySet().stream(),
aliasGroups.values().stream().flatMap(Collection::stream))
.collect(Collectors.toSet());
// pass through all configurations without aliases
Map<String, T> newConfigs = configs.entrySet().stream()
.filter(e -> !aliasSet.contains(e.getKey()))
// filter out null values
.filter(e -> Objects.nonNull(e.getValue()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
aliasGroups.forEach((target, aliases) -> {
List<String> deprecated = aliases.stream()
.filter(configs::containsKey)
.collect(Collectors.toList());
if (deprecated.isEmpty()) {
// No deprecated key(s) found.
if (configs.containsKey(target)) {
newConfigs.put(target, configs.get(target));
}
return;
}
String aliasString = String.join(", ", deprecated);
if (configs.containsKey(target)) {
// Ignore the deprecated key(s) because the actual key was set.
log.error(target + " was configured, as well as the deprecated alias(es) " +
aliasString + ". Using the value of " + target);
newConfigs.put(target, configs.get(target));
} else if (deprecated.size() > 1) {
log.error("The configuration keys " + aliasString + " are deprecated and may be " +
"removed in the future. Additionally, this configuration is ambigous because " +
"these configuration keys are all aliases for " + target + ". Please update " +
"your configuration to have only " + target + " set.");
newConfigs.put(target, configs.get(deprecated.get(0)));
} else {
log.warn("Configuration key " + deprecated.get(0) + " is deprecated and may be removed " +
"in the future. Please update your configuration to use " + target + " instead.");
newConfigs.put(target, configs.get(deprecated.get(0)));
}
});
return newConfigs;
}
public static String configMapToRedactedString(Map<String, Object> map, ConfigDef configDef) {
StringBuilder bld = new StringBuilder("{");
List<String> keys = new ArrayList<>(map.keySet());
Collections.sort(keys);
String prefix = "";
for (String key : keys) {
bld.append(prefix).append(key).append("=");
ConfigKey configKey = configDef.configKeys().get(key);
if (configKey == null || configKey.type().isSensitive()) {
bld.append("(redacted)");
} else {
Object value = map.get(key);
if (value == null) {
bld.append("null");
} else if (configKey.type() == Type.STRING) {
bld.append("\"").append(value).append("\"");
} else {
bld.append(value);
}
}
prefix = ", ";
}
bld.append("}");
return bld.toString();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/CopyOnWriteMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
/**
* A simple read-optimized map implementation that synchronizes only writes and does a full copy on each modification
*/
public class CopyOnWriteMap<K, V> implements ConcurrentMap<K, V> {
private volatile Map<K, V> map;
public CopyOnWriteMap() {
this.map = Collections.emptyMap();
}
public CopyOnWriteMap(Map<K, V> map) {
this.map = Collections.unmodifiableMap(map);
}
@Override
public boolean containsKey(Object k) {
return map.containsKey(k);
}
@Override
public boolean containsValue(Object v) {
return map.containsValue(v);
}
@Override
public Set<java.util.Map.Entry<K, V>> entrySet() {
return map.entrySet();
}
@Override
public V get(Object k) {
return map.get(k);
}
@Override
public boolean isEmpty() {
return map.isEmpty();
}
@Override
public Set<K> keySet() {
return map.keySet();
}
@Override
public int size() {
return map.size();
}
@Override
public Collection<V> values() {
return map.values();
}
@Override
public synchronized void clear() {
this.map = Collections.emptyMap();
}
@Override
public synchronized V put(K k, V v) {
Map<K, V> copy = new HashMap<>(this.map);
V prev = copy.put(k, v);
this.map = Collections.unmodifiableMap(copy);
return prev;
}
@Override
public synchronized void putAll(Map<? extends K, ? extends V> entries) {
Map<K, V> copy = new HashMap<>(this.map);
copy.putAll(entries);
this.map = Collections.unmodifiableMap(copy);
}
@Override
public synchronized V remove(Object key) {
Map<K, V> copy = new HashMap<>(this.map);
V prev = copy.remove(key);
this.map = Collections.unmodifiableMap(copy);
return prev;
}
@Override
public synchronized V putIfAbsent(K k, V v) {
if (!containsKey(k))
return put(k, v);
else
return get(k);
}
@Override
public synchronized boolean remove(Object k, Object v) {
if (containsKey(k) && get(k).equals(v)) {
remove(k);
return true;
} else {
return false;
}
}
@Override
public synchronized boolean replace(K k, V original, V replacement) {
if (containsKey(k) && get(k).equals(original)) {
put(k, replacement);
return true;
} else {
return false;
}
}
@Override
public synchronized V replace(K k, V v) {
if (containsKey(k)) {
return put(k, v);
} else {
return null;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/Crc32C.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.nio.ByteBuffer;
import java.util.zip.Checksum;
/**
* A class that can be used to compute the CRC32C (Castagnoli) of a ByteBuffer or array of bytes.
*
* We use java.util.zip.CRC32C (introduced in Java 9) if it is available and fallback to PureJavaCrc32C, otherwise.
* java.util.zip.CRC32C is significantly faster on reasonably modern CPUs as it uses the CRC32 instruction introduced
* in SSE4.2.
*
* NOTE: This class is intended for INTERNAL usage only within Kafka.
*/
public final class Crc32C {
private static final ChecksumFactory CHECKSUM_FACTORY;
static {
if (Java.IS_JAVA9_COMPATIBLE)
CHECKSUM_FACTORY = new Java9ChecksumFactory();
else
CHECKSUM_FACTORY = new PureJavaChecksumFactory();
}
private Crc32C() {}
/**
* Compute the CRC32C (Castagnoli) of the segment of the byte array given by the specified size and offset
*
* @param bytes The bytes to checksum
* @param offset the offset at which to begin the checksum computation
* @param size the number of bytes to checksum
* @return The CRC32C
*/
public static long compute(byte[] bytes, int offset, int size) {
Checksum crc = create();
crc.update(bytes, offset, size);
return crc.getValue();
}
/**
* Compute the CRC32C (Castagnoli) of a byte buffer from a given offset (relative to the buffer's current position)
*
* @param buffer The buffer with the underlying data
* @param offset The offset relative to the current position
* @param size The number of bytes beginning from the offset to include
* @return The CRC32C
*/
public static long compute(ByteBuffer buffer, int offset, int size) {
Checksum crc = create();
Checksums.update(crc, buffer, offset, size);
return crc.getValue();
}
public static Checksum create() {
return CHECKSUM_FACTORY.create();
}
private interface ChecksumFactory {
Checksum create();
}
private static class Java9ChecksumFactory implements ChecksumFactory {
private static final MethodHandle CONSTRUCTOR;
static {
try {
Class<?> cls = Class.forName("java.util.zip.CRC32C");
CONSTRUCTOR = MethodHandles.publicLookup().findConstructor(cls, MethodType.methodType(void.class));
} catch (ReflectiveOperationException e) {
// Should never happen
throw new RuntimeException(e);
}
}
@Override
public Checksum create() {
try {
return (Checksum) CONSTRUCTOR.invoke();
} catch (Throwable throwable) {
// Should never happen
throw new RuntimeException(throwable);
}
}
}
private static class PureJavaChecksumFactory implements ChecksumFactory {
@Override
public Checksum create() {
return new PureJavaCrc32C();
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/Exit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
/**
* Internal class that should be used instead of `System.exit()` and `Runtime.getRuntime().halt()` so that tests can
* easily change the behaviour.
*/
public class Exit {
@FunctionalInterface
public interface Procedure {
void execute(int statusCode, String message);
}
@FunctionalInterface
public interface ShutdownHookAdder {
void addShutdownHook(String name, Runnable runnable);
}
private static final Procedure DEFAULT_HALT_PROCEDURE = (statusCode, message) -> Runtime.getRuntime().halt(statusCode);
private static final Procedure DEFAULT_EXIT_PROCEDURE = (statusCode, message) -> System.exit(statusCode);
private static final ShutdownHookAdder DEFAULT_SHUTDOWN_HOOK_ADDER = (name, runnable) -> {
if (name != null)
Runtime.getRuntime().addShutdownHook(KafkaThread.nonDaemon(name, runnable));
else
Runtime.getRuntime().addShutdownHook(new Thread(runnable));
};
private volatile static Procedure exitProcedure = DEFAULT_EXIT_PROCEDURE;
private volatile static Procedure haltProcedure = DEFAULT_HALT_PROCEDURE;
private volatile static ShutdownHookAdder shutdownHookAdder = DEFAULT_SHUTDOWN_HOOK_ADDER;
public static void exit(int statusCode) {
exit(statusCode, null);
}
public static void exit(int statusCode, String message) {
exitProcedure.execute(statusCode, message);
}
public static void halt(int statusCode) {
halt(statusCode, null);
}
public static void halt(int statusCode, String message) {
haltProcedure.execute(statusCode, message);
}
public static void addShutdownHook(String name, Runnable runnable) {
shutdownHookAdder.addShutdownHook(name, runnable);
}
public static void setExitProcedure(Procedure procedure) {
exitProcedure = procedure;
}
public static void setHaltProcedure(Procedure procedure) {
haltProcedure = procedure;
}
public static void setShutdownHookAdder(ShutdownHookAdder shutdownHookAdder) {
Exit.shutdownHookAdder = shutdownHookAdder;
}
public static void resetExitProcedure() {
exitProcedure = DEFAULT_EXIT_PROCEDURE;
}
public static void resetHaltProcedure() {
haltProcedure = DEFAULT_HALT_PROCEDURE;
}
public static void resetShutdownHookAdder() {
shutdownHookAdder = DEFAULT_SHUTDOWN_HOOK_ADDER;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/ExponentialBackoff.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.concurrent.ThreadLocalRandom;
/**
* A utility class for keeping the parameters and providing the value of exponential
* retry backoff, exponential reconnect backoff, exponential timeout, etc.
* <p>
* The formula is:
* <pre>Backoff(attempts) = random(1 - jitter, 1 + jitter) * initialInterval * multiplier ^ attempts</pre>
* If {@code initialInterval} is greater than or equal to {@code maxInterval}, a constant backoff of
* {@code initialInterval} will be provided.
* <p>
* This class is thread-safe.
*/
public class ExponentialBackoff {
private final int multiplier;
private final double expMax;
private final long initialInterval;
private final double jitter;
public ExponentialBackoff(long initialInterval, int multiplier, long maxInterval, double jitter) {
this.initialInterval = initialInterval;
this.multiplier = multiplier;
this.jitter = jitter;
this.expMax = maxInterval > initialInterval ?
Math.log(maxInterval / (double) Math.max(initialInterval, 1)) / Math.log(multiplier) : 0;
}
public long backoff(long attempts) {
if (expMax == 0) {
return initialInterval;
}
double exp = Math.min(attempts, this.expMax);
double term = initialInterval * Math.pow(multiplier, exp);
double randomFactor = jitter < Double.MIN_NORMAL ? 1.0 :
ThreadLocalRandom.current().nextDouble(1 - jitter, 1 + jitter);
return (long) (randomFactor * term);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/FixedOrderMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* An ordered map (LinkedHashMap) implementation for which the order is immutable.
* To accomplish this, all methods of removing mappings are disabled (they are marked
* deprecated and throw an exception).
*
* This class is final to prevent subclasses from violating the desired property.
*
* @param <K> The key type
* @param <V> The value type
*/
public final class FixedOrderMap<K, V> extends LinkedHashMap<K, V> {
private static final long serialVersionUID = -6504110858733236170L;
@Deprecated
@Override
protected boolean removeEldestEntry(final Map.Entry<K, V> eldest) {
return false;
}
@Deprecated
@Override
public V remove(final Object key) {
throw new UnsupportedOperationException("Removing from registeredStores is not allowed");
}
@Deprecated
@Override
public boolean remove(final Object key, final Object value) {
throw new UnsupportedOperationException("Removing from registeredStores is not allowed");
}
@Override
public FixedOrderMap<K, V> clone() {
throw new UnsupportedOperationException();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/FlattenedIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.Iterator;
import java.util.function.Function;
/**
* Provides a flattened iterator over the inner elements of an outer iterator.
*/
public final class FlattenedIterator<O, I> extends AbstractIterator<I> {
private final Iterator<O> outerIterator;
private final Function<O, Iterator<I>> innerIteratorFunction;
private Iterator<I> innerIterator;
public FlattenedIterator(Iterator<O> outerIterator, Function<O, Iterator<I>> innerIteratorFunction) {
this.outerIterator = outerIterator;
this.innerIteratorFunction = innerIteratorFunction;
}
@Override
public I makeNext() {
while (innerIterator == null || !innerIterator.hasNext()) {
if (outerIterator.hasNext())
innerIterator = innerIteratorFunction.apply(outerIterator.next());
else
return allDone();
}
return innerIterator.next();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/ImplicitLinkedHashCollection.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.AbstractCollection;
import java.util.AbstractSequentialList;
import java.util.AbstractSet;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.NoSuchElementException;
import java.util.Set;
/**
* A memory-efficient hash set which tracks the order of insertion of elements.
*
* Like java.util.LinkedHashSet, this collection maintains a linked list of elements.
* However, rather than using a separate linked list, this collection embeds the next
* and previous fields into the elements themselves. This reduces memory consumption,
* because it means that we only have to store one Java object per element, rather
* than multiple.
*
* The next and previous fields are stored as array indices rather than pointers.
* This ensures that the fields only take 32 bits, even when pointers are 64 bits.
* It also makes the garbage collector's job easier, because it reduces the number of
* pointers that it must chase.
*
* This class uses linear probing. Unlike HashMap (but like HashTable), we don't force
* the size to be a power of 2. This saves memory.
*
* This set does not allow null elements. It does not have internal synchronization.
*/
public class ImplicitLinkedHashCollection<E extends ImplicitLinkedHashCollection.Element> extends AbstractCollection<E> {
/**
* The interface which elements of this collection must implement. The prev,
* setPrev, next, and setNext functions handle manipulating the implicit linked
* list which these elements reside in inside the collection.
* elementKeysAreEqual() is the function which this collection uses to compare
* elements.
*/
public interface Element {
int prev();
void setPrev(int prev);
int next();
void setNext(int next);
default boolean elementKeysAreEqual(Object other) {
return equals(other);
}
}
/**
* A special index value used to indicate that the next or previous field is
* the head.
*/
private static final int HEAD_INDEX = -1;
/**
* A special index value used for next and previous indices which have not
* been initialized.
*/
public static final int INVALID_INDEX = -2;
/**
* The minimum new capacity for a non-empty implicit hash set.
*/
private static final int MIN_NONEMPTY_CAPACITY = 5;
/**
* A static empty array used to avoid object allocations when the capacity is zero.
*/
private static final Element[] EMPTY_ELEMENTS = new Element[0];
private static class HeadElement implements Element {
static final HeadElement EMPTY = new HeadElement();
private int prev = HEAD_INDEX;
private int next = HEAD_INDEX;
@Override
public int prev() {
return prev;
}
@Override
public void setPrev(int prev) {
this.prev = prev;
}
@Override
public int next() {
return next;
}
@Override
public void setNext(int next) {
this.next = next;
}
}
private static Element indexToElement(Element head, Element[] elements, int index) {
if (index == HEAD_INDEX) {
return head;
}
return elements[index];
}
private static void addToListTail(Element head, Element[] elements, int elementIdx) {
int oldTailIdx = head.prev();
Element element = indexToElement(head, elements, elementIdx);
Element oldTail = indexToElement(head, elements, oldTailIdx);
head.setPrev(elementIdx);
oldTail.setNext(elementIdx);
element.setPrev(oldTailIdx);
element.setNext(HEAD_INDEX);
}
private static void removeFromList(Element head, Element[] elements, int elementIdx) {
Element element = indexToElement(head, elements, elementIdx);
elements[elementIdx] = null;
int prevIdx = element.prev();
int nextIdx = element.next();
Element prev = indexToElement(head, elements, prevIdx);
Element next = indexToElement(head, elements, nextIdx);
prev.setNext(nextIdx);
next.setPrev(prevIdx);
element.setNext(INVALID_INDEX);
element.setPrev(INVALID_INDEX);
}
private class ImplicitLinkedHashCollectionIterator implements ListIterator<E> {
private int index = 0;
private Element cur;
private Element lastReturned;
ImplicitLinkedHashCollectionIterator(int index) {
this.cur = indexToElement(head, elements, head.next());
for (int i = 0; i < index; ++i) {
next();
}
this.lastReturned = null;
}
@Override
public boolean hasNext() {
return cur != head;
}
@Override
public boolean hasPrevious() {
return indexToElement(head, elements, cur.prev()) != head;
}
@Override
public E next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
@SuppressWarnings("unchecked")
E returnValue = (E) cur;
lastReturned = cur;
cur = indexToElement(head, elements, cur.next());
++index;
return returnValue;
}
@Override
public E previous() {
Element prev = indexToElement(head, elements, cur.prev());
if (prev == head) {
throw new NoSuchElementException();
}
cur = prev;
--index;
lastReturned = cur;
@SuppressWarnings("unchecked")
E returnValue = (E) cur;
return returnValue;
}
@Override
public int nextIndex() {
return index;
}
@Override
public int previousIndex() {
return index - 1;
}
@Override
public void remove() {
if (lastReturned == null) {
throw new IllegalStateException();
}
Element nextElement = indexToElement(head, elements, lastReturned.next());
ImplicitLinkedHashCollection.this.removeElementAtSlot(nextElement.prev());
if (lastReturned == cur) {
// If the element we are removing was cur, set cur to cur->next.
cur = nextElement;
} else {
// If the element we are removing comes before cur, decrement the index,
// since there are now fewer entries before cur.
--index;
}
lastReturned = null;
}
@Override
public void set(E e) {
throw new UnsupportedOperationException();
}
@Override
public void add(E e) {
throw new UnsupportedOperationException();
}
}
private class ImplicitLinkedHashCollectionListView extends AbstractSequentialList<E> {
@Override
public ListIterator<E> listIterator(int index) {
if (index < 0 || index > size) {
throw new IndexOutOfBoundsException();
}
return ImplicitLinkedHashCollection.this.listIterator(index);
}
@Override
public int size() {
return size;
}
}
private class ImplicitLinkedHashCollectionSetView extends AbstractSet<E> {
@Override
public Iterator<E> iterator() {
return ImplicitLinkedHashCollection.this.iterator();
}
@Override
public int size() {
return size;
}
@Override
public boolean add(E newElement) {
return ImplicitLinkedHashCollection.this.add(newElement);
}
@Override
public boolean remove(Object key) {
return ImplicitLinkedHashCollection.this.remove(key);
}
@Override
public boolean contains(Object key) {
return ImplicitLinkedHashCollection.this.contains(key);
}
@Override
public void clear() {
ImplicitLinkedHashCollection.this.clear();
}
}
private Element head;
Element[] elements;
private int size;
/**
* Returns an iterator that will yield every element in the set.
* The elements will be returned in the order that they were inserted in.
*
* Do not modify the set while you are iterating over it (except by calling
* remove on the iterator itself, of course.)
*/
@Override
final public Iterator<E> iterator() {
return listIterator(0);
}
private ListIterator<E> listIterator(int index) {
return new ImplicitLinkedHashCollectionIterator(index);
}
final int slot(Element[] curElements, Object e) {
return (e.hashCode() & 0x7fffffff) % curElements.length;
}
/**
* Find an element matching an example element.
*
* Using the element's hash code, we can look up the slot where it belongs.
* However, it may not have ended up in exactly this slot, due to a collision.
* Therefore, we must search forward in the array until we hit a null, before
* concluding that the element is not present.
*
* @param key The element to match.
* @return The match index, or INVALID_INDEX if no match was found.
*/
private int findIndexOfEqualElement(Object key) {
if (key == null || size == 0) {
return INVALID_INDEX;
}
int slot = slot(elements, key);
for (int seen = 0; seen < elements.length; seen++) {
Element element = elements[slot];
if (element == null) {
return INVALID_INDEX;
}
if (element.elementKeysAreEqual(key)) {
return slot;
}
slot = (slot + 1) % elements.length;
}
return INVALID_INDEX;
}
/**
* An element e in the collection such that e.elementKeysAreEqual(key) and
* e.hashCode() == key.hashCode().
*
* @param key The element to match.
* @return The matching element, or null if there were none.
*/
final public E find(E key) {
int index = findIndexOfEqualElement(key);
if (index == INVALID_INDEX) {
return null;
}
@SuppressWarnings("unchecked")
E result = (E) elements[index];
return result;
}
/**
* Returns the number of elements in the set.
*/
@Override
final public int size() {
return size;
}
/**
* Returns true if there is at least one element e in the collection such
* that key.elementKeysAreEqual(e) and key.hashCode() == e.hashCode().
*
* @param key The object to try to match.
*/
@Override
final public boolean contains(Object key) {
return findIndexOfEqualElement(key) != INVALID_INDEX;
}
private static int calculateCapacity(int expectedNumElements) {
// Avoid using even-sized capacities, to get better key distribution.
int newCapacity = (2 * expectedNumElements) + 1;
// Don't use a capacity that is too small.
return Math.max(newCapacity, MIN_NONEMPTY_CAPACITY);
}
/**
* Add a new element to the collection.
*
* @param newElement The new element.
*
* @return True if the element was added to the collection;
* false if it was not, because there was an existing equal element.
*/
@Override
final public boolean add(E newElement) {
if (newElement == null) {
return false;
}
if (newElement.prev() != INVALID_INDEX || newElement.next() != INVALID_INDEX) {
return false;
}
if ((size + 1) >= elements.length / 2) {
changeCapacity(calculateCapacity(elements.length));
}
int slot = addInternal(newElement, elements);
if (slot >= 0) {
addToListTail(head, elements, slot);
size++;
return true;
}
return false;
}
final public void mustAdd(E newElement) {
if (!add(newElement)) {
throw new RuntimeException("Unable to add " + newElement);
}
}
/**
* Adds a new element to the appropriate place in the elements array.
*
* @param newElement The new element to add.
* @param addElements The elements array.
* @return The index at which the element was inserted, or INVALID_INDEX
* if the element could not be inserted.
*/
int addInternal(Element newElement, Element[] addElements) {
int slot = slot(addElements, newElement);
for (int seen = 0; seen < addElements.length; seen++) {
Element element = addElements[slot];
if (element == null) {
addElements[slot] = newElement;
return slot;
}
if (element.elementKeysAreEqual(newElement)) {
return INVALID_INDEX;
}
slot = (slot + 1) % addElements.length;
}
throw new RuntimeException("Not enough hash table slots to add a new element.");
}
private void changeCapacity(int newCapacity) {
Element[] newElements = new Element[newCapacity];
HeadElement newHead = new HeadElement();
int oldSize = size;
for (Iterator<E> iter = iterator(); iter.hasNext(); ) {
Element element = iter.next();
iter.remove();
int newSlot = addInternal(element, newElements);
addToListTail(newHead, newElements, newSlot);
}
this.elements = newElements;
this.head = newHead;
this.size = oldSize;
}
/**
* Remove the first element e such that key.elementKeysAreEqual(e)
* and key.hashCode == e.hashCode.
*
* @param key The object to try to match.
* @return True if an element was removed; false otherwise.
*/
@Override
final public boolean remove(Object key) {
int slot = findElementToRemove(key);
if (slot == INVALID_INDEX) {
return false;
}
removeElementAtSlot(slot);
return true;
}
int findElementToRemove(Object key) {
return findIndexOfEqualElement(key);
}
/**
* Remove an element in a particular slot.
*
* @param slot The slot of the element to remove.
*
* @return True if an element was removed; false otherwise.
*/
private boolean removeElementAtSlot(int slot) {
size--;
removeFromList(head, elements, slot);
slot = (slot + 1) % elements.length;
// Find the next empty slot
int endSlot = slot;
for (int seen = 0; seen < elements.length; seen++) {
Element element = elements[endSlot];
if (element == null) {
break;
}
endSlot = (endSlot + 1) % elements.length;
}
// We must preserve the denseness invariant. The denseness invariant says that
// any element is either in the slot indicated by its hash code, or a slot which
// is not separated from that slot by any nulls.
// Reseat all elements in between the deleted element and the next empty slot.
while (slot != endSlot) {
reseat(slot);
slot = (slot + 1) % elements.length;
}
return true;
}
private void reseat(int prevSlot) {
Element element = elements[prevSlot];
int newSlot = slot(elements, element);
for (int seen = 0; seen < elements.length; seen++) {
Element e = elements[newSlot];
if ((e == null) || (e == element)) {
break;
}
newSlot = (newSlot + 1) % elements.length;
}
if (newSlot == prevSlot) {
return;
}
Element prev = indexToElement(head, elements, element.prev());
prev.setNext(newSlot);
Element next = indexToElement(head, elements, element.next());
next.setPrev(newSlot);
elements[prevSlot] = null;
elements[newSlot] = element;
}
/**
* Create a new ImplicitLinkedHashCollection.
*/
public ImplicitLinkedHashCollection() {
this(0);
}
/**
* Create a new ImplicitLinkedHashCollection.
*
* @param expectedNumElements The number of elements we expect to have in this set.
* This is used to optimize by setting the capacity ahead
* of time rather than growing incrementally.
*/
public ImplicitLinkedHashCollection(int expectedNumElements) {
clear(expectedNumElements);
}
/**
* Create a new ImplicitLinkedHashCollection.
*
* @param iter We will add all the elements accessible through this iterator
* to the set.
*/
public ImplicitLinkedHashCollection(Iterator<E> iter) {
clear(0);
while (iter.hasNext()) {
mustAdd(iter.next());
}
}
/**
* Removes all of the elements from this set.
*/
@Override
final public void clear() {
clear(elements.length);
}
/**
* Moves an element which is already in the collection so that it comes last
* in iteration order.
*/
final public void moveToEnd(E element) {
if (element.prev() == INVALID_INDEX || element.next() == INVALID_INDEX) {
throw new RuntimeException("Element " + element + " is not in the collection.");
}
Element prevElement = indexToElement(head, elements, element.prev());
Element nextElement = indexToElement(head, elements, element.next());
int slot = prevElement.next();
prevElement.setNext(element.next());
nextElement.setPrev(element.prev());
addToListTail(head, elements, slot);
}
/**
* Removes all of the elements from this set, and resets the set capacity
* based on the provided expected number of elements.
*/
final public void clear(int expectedNumElements) {
if (expectedNumElements == 0) {
// Optimize away object allocations for empty sets.
this.head = HeadElement.EMPTY;
this.elements = EMPTY_ELEMENTS;
this.size = 0;
} else {
this.head = new HeadElement();
this.elements = new Element[calculateCapacity(expectedNumElements)];
this.size = 0;
}
}
/**
* Compares the specified object with this collection for equality. Two
* {@code ImplicitLinkedHashCollection} objects are equal if they contain the
* same elements (as determined by the element's {@code equals} method), and
* those elements were inserted in the same order. Because
* {@code ImplicitLinkedHashCollectionListIterator} iterates over the elements
* in insertion order, it is sufficient to call {@code valuesList.equals}.
*
* Note that {@link ImplicitLinkedHashMultiCollection} does not override
* {@code equals} and uses this method as well. This means that two
* {@code ImplicitLinkedHashMultiCollection} objects will be considered equal even
* if they each contain two elements A and B such that A.equals(B) but A != B and
* A and B have switched insertion positions between the two collections. This
* is an acceptable definition of equality, because the collections are still
* equal in terms of the order and value of each element.
*
* @param o object to be compared for equality with this collection
* @return true is the specified object is equal to this collection
*/
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof ImplicitLinkedHashCollection))
return false;
ImplicitLinkedHashCollection<?> ilhs = (ImplicitLinkedHashCollection<?>) o;
return this.valuesList().equals(ilhs.valuesList());
}
/**
* Returns the hash code value for this collection. Because
* {@code ImplicitLinkedHashCollection.equals} compares the {@code valuesList}
* of two {@code ImplicitLinkedHashCollection} objects to determine equality,
* this method uses the @{code valuesList} to compute the has code value as well.
*
* @return the hash code value for this collection
*/
@Override
public int hashCode() {
return this.valuesList().hashCode();
}
// Visible for testing
final int numSlots() {
return elements.length;
}
/**
* Returns a {@link List} view of the elements contained in the collection,
* ordered by order of insertion into the collection. The list is backed by the
* collection, so changes to the collection are reflected in the list and
* vice-versa. The list supports element removal, which removes the corresponding
* element from the collection, but does not support the {@code add} or
* {@code set} operations.
*
* The list is implemented as a circular linked list, so all index-based
* operations, such as {@code List.get}, run in O(n) time.
*
* @return a list view of the elements contained in this collection
*/
public List<E> valuesList() {
return new ImplicitLinkedHashCollectionListView();
}
/**
* Returns a {@link Set} view of the elements contained in the collection. The
* set is backed by the collection, so changes to the collection are reflected in
* the set, and vice versa. The set supports element removal and addition, which
* removes from or adds to the collection, respectively.
*
* @return a set view of the elements contained in this collection
*/
public Set<E> valuesSet() {
return new ImplicitLinkedHashCollectionSetView();
}
public void sort(Comparator<E> comparator) {
ArrayList<E> array = new ArrayList<>(size);
Iterator<E> iterator = iterator();
while (iterator.hasNext()) {
E e = iterator.next();
iterator.remove();
array.add(e);
}
array.sort(comparator);
for (E e : array) {
add(e);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/ImplicitLinkedHashMultiCollection.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
/**
* A memory-efficient hash multiset which tracks the order of insertion of elements.
* See org.apache.kafka.common.utils.ImplicitLinkedHashCollection for implementation details.
*
* This class is a multi-set because it allows multiple elements to be inserted that
* have equivalent keys.
*
* We use reference equality when adding elements to the set. A new element A can
* be added if there is no existing element B such that A == B. If an element B
* exists such that A.elementKeysAreEqual(B), A will still be added.
*
* When deleting an element A from the set, we will try to delete the element B such
* that A == B. If no such element can be found, we will try to delete an element B
* such that A.elementKeysAreEqual(B).
*
* contains() and find() are unchanged from the base class-- they will look for element
* based on object equality via elementKeysAreEqual, not reference equality.
*
* This multiset does not allow null elements. It does not have internal synchronization.
*/
public class ImplicitLinkedHashMultiCollection<E extends ImplicitLinkedHashCollection.Element>
extends ImplicitLinkedHashCollection<E> {
public ImplicitLinkedHashMultiCollection() {
super(0);
}
public ImplicitLinkedHashMultiCollection(int expectedNumElements) {
super(expectedNumElements);
}
public ImplicitLinkedHashMultiCollection(Iterator<E> iter) {
super(iter);
}
/**
* Adds a new element to the appropriate place in the elements array.
*
* @param newElement The new element to add.
* @param addElements The elements array.
* @return The index at which the element was inserted, or INVALID_INDEX
* if the element could not be inserted.
*/
@Override
int addInternal(Element newElement, Element[] addElements) {
int slot = slot(addElements, newElement);
for (int seen = 0; seen < addElements.length; seen++) {
Element element = addElements[slot];
if (element == null) {
addElements[slot] = newElement;
return slot;
}
if (element == newElement) {
return INVALID_INDEX;
}
slot = (slot + 1) % addElements.length;
}
throw new RuntimeException("Not enough hash table slots to add a new element.");
}
/**
* Find an element matching an example element.
*
* @param key The element to match.
*
* @return The match index, or INVALID_INDEX if no match was found.
*/
@Override
int findElementToRemove(Object key) {
if (key == null || size() == 0) {
return INVALID_INDEX;
}
int slot = slot(elements, key);
int bestSlot = INVALID_INDEX;
for (int seen = 0; seen < elements.length; seen++) {
Element element = elements[slot];
if (element == null) {
return bestSlot;
}
if (key == element) {
return slot;
} else if (element.elementKeysAreEqual(key)) {
bestSlot = slot;
}
slot = (slot + 1) % elements.length;
}
return INVALID_INDEX;
}
/**
* Returns all of the elements e in the collection such that
* key.elementKeysAreEqual(e) and key.hashCode() == e.hashCode().
*
* @param key The element to match.
*
* @return All of the matching elements.
*/
final public List<E> findAll(E key) {
if (key == null || size() == 0) {
return Collections.emptyList();
}
ArrayList<E> results = new ArrayList<>();
int slot = slot(elements, key);
for (int seen = 0; seen < elements.length; seen++) {
Element element = elements[slot];
if (element == null) {
break;
}
if (key.elementKeysAreEqual(element)) {
@SuppressWarnings("unchecked")
E result = (E) elements[slot];
results.add(result);
}
slot = (slot + 1) % elements.length;
}
return results;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/Java.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.StringTokenizer;
public final class Java {
private Java() { }
private static final Version VERSION = parseVersion(System.getProperty("java.specification.version"));
// Package private for testing
static Version parseVersion(String versionString) {
final StringTokenizer st = new StringTokenizer(versionString, ".");
int majorVersion = Integer.parseInt(st.nextToken());
int minorVersion;
if (st.hasMoreTokens())
minorVersion = Integer.parseInt(st.nextToken());
else
minorVersion = 0;
return new Version(majorVersion, minorVersion);
}
// Having these as static final provides the best opportunity for compilar optimization
public static final boolean IS_JAVA9_COMPATIBLE = VERSION.isJava9Compatible();
public static final boolean IS_JAVA11_COMPATIBLE = VERSION.isJava11Compatible();
public static boolean isIbmJdk() {
return System.getProperty("java.vendor").contains("IBM");
}
public static boolean isIbmJdkSemeru() {
return isIbmJdk() && System.getProperty("java.runtime.name", "").contains("Semeru");
}
// Package private for testing
static class Version {
public final int majorVersion;
public final int minorVersion;
private Version(int majorVersion, int minorVersion) {
this.majorVersion = majorVersion;
this.minorVersion = minorVersion;
}
@Override
public String toString() {
return "Version(majorVersion=" + majorVersion +
", minorVersion=" + minorVersion + ")";
}
// Package private for testing
boolean isJava9Compatible() {
return majorVersion >= 9;
}
boolean isJava11Compatible() {
return majorVersion >= 11;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/KafkaThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A wrapper for Thread that sets things up nicely
*/
public class KafkaThread extends Thread {
private final Logger log = LoggerFactory.getLogger(getClass());
public static KafkaThread daemon(final String name, Runnable runnable) {
return new KafkaThread(name, runnable, true);
}
public static KafkaThread nonDaemon(final String name, Runnable runnable) {
return new KafkaThread(name, runnable, false);
}
public KafkaThread(final String name, boolean daemon) {
super(name);
configureThread(name, daemon);
}
public KafkaThread(final String name, Runnable runnable, boolean daemon) {
super(runnable, name);
configureThread(name, daemon);
}
private void configureThread(final String name, boolean daemon) {
setDaemon(daemon);
setUncaughtExceptionHandler((t, e) -> log.error("Uncaught exception in thread '{}':", name, e));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/LogContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.Marker;
import org.slf4j.helpers.FormattingTuple;
import org.slf4j.helpers.MessageFormatter;
import org.slf4j.spi.LocationAwareLogger;
/**
* This class provides a way to instrument loggers with a common context which can be used to
* automatically enrich log messages. For example, in the KafkaConsumer, it is often useful to know
* the groupId of the consumer, so this can be added to a context object which can then be passed to
* all of the dependent components in order to build new loggers. This removes the need to manually
* add the groupId to each message.
*/
public class LogContext {
private final String logPrefix;
public LogContext(String logPrefix) {
this.logPrefix = logPrefix == null ? "" : logPrefix;
}
public LogContext() {
this("");
}
public Logger logger(Class<?> clazz) {
Logger logger = LoggerFactory.getLogger(clazz);
if (logger instanceof LocationAwareLogger) {
return new LocationAwareKafkaLogger(logPrefix, (LocationAwareLogger) logger);
} else {
return new LocationIgnorantKafkaLogger(logPrefix, logger);
}
}
public String logPrefix() {
return logPrefix;
}
private static abstract class AbstractKafkaLogger implements Logger {
private final String prefix;
protected AbstractKafkaLogger(final String prefix) {
this.prefix = prefix;
}
protected String addPrefix(final String message) {
return prefix + message;
}
}
private static class LocationAwareKafkaLogger extends AbstractKafkaLogger {
private final LocationAwareLogger logger;
private final String fqcn;
LocationAwareKafkaLogger(String logPrefix, LocationAwareLogger logger) {
super(logPrefix);
this.logger = logger;
this.fqcn = LocationAwareKafkaLogger.class.getName();
}
@Override
public String getName() {
return logger.getName();
}
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
@Override
public boolean isTraceEnabled(Marker marker) {
return logger.isTraceEnabled(marker);
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
@Override
public boolean isDebugEnabled(Marker marker) {
return logger.isDebugEnabled(marker);
}
@Override
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
@Override
public boolean isInfoEnabled(Marker marker) {
return logger.isInfoEnabled(marker);
}
@Override
public boolean isWarnEnabled() {
return logger.isWarnEnabled();
}
@Override
public boolean isWarnEnabled(Marker marker) {
return logger.isWarnEnabled(marker);
}
@Override
public boolean isErrorEnabled() {
return logger.isErrorEnabled();
}
@Override
public boolean isErrorEnabled(Marker marker) {
return logger.isErrorEnabled(marker);
}
@Override
public void trace(String message) {
if (logger.isTraceEnabled()) {
writeLog(null, LocationAwareLogger.TRACE_INT, message, null, null);
}
}
@Override
public void trace(String format, Object arg) {
if (logger.isTraceEnabled()) {
writeLog(null, LocationAwareLogger.TRACE_INT, format, new Object[]{arg}, null);
}
}
@Override
public void trace(String format, Object arg1, Object arg2) {
if (logger.isTraceEnabled()) {
writeLog(null, LocationAwareLogger.TRACE_INT, format, new Object[]{arg1, arg2}, null);
}
}
@Override
public void trace(String format, Object... args) {
if (logger.isTraceEnabled()) {
writeLog(null, LocationAwareLogger.TRACE_INT, format, args, null);
}
}
@Override
public void trace(String msg, Throwable t) {
if (logger.isTraceEnabled()) {
writeLog(null, LocationAwareLogger.TRACE_INT, msg, null, t);
}
}
@Override
public void trace(Marker marker, String msg) {
if (logger.isTraceEnabled()) {
writeLog(marker, LocationAwareLogger.TRACE_INT, msg, null, null);
}
}
@Override
public void trace(Marker marker, String format, Object arg) {
if (logger.isTraceEnabled()) {
writeLog(marker, LocationAwareLogger.TRACE_INT, format, new Object[]{arg}, null);
}
}
@Override
public void trace(Marker marker, String format, Object arg1, Object arg2) {
if (logger.isTraceEnabled()) {
writeLog(marker, LocationAwareLogger.TRACE_INT, format, new Object[]{arg1, arg2}, null);
}
}
@Override
public void trace(Marker marker, String format, Object... argArray) {
if (logger.isTraceEnabled()) {
writeLog(marker, LocationAwareLogger.TRACE_INT, format, argArray, null);
}
}
@Override
public void trace(Marker marker, String msg, Throwable t) {
if (logger.isTraceEnabled()) {
writeLog(marker, LocationAwareLogger.TRACE_INT, msg, null, t);
}
}
@Override
public void debug(String message) {
if (logger.isDebugEnabled()) {
writeLog(null, LocationAwareLogger.DEBUG_INT, message, null, null);
}
}
@Override
public void debug(String format, Object arg) {
if (logger.isDebugEnabled()) {
writeLog(null, LocationAwareLogger.DEBUG_INT, format, new Object[]{arg}, null);
}
}
@Override
public void debug(String format, Object arg1, Object arg2) {
if (logger.isDebugEnabled()) {
writeLog(null, LocationAwareLogger.DEBUG_INT, format, new Object[]{arg1, arg2}, null);
}
}
@Override
public void debug(String format, Object... args) {
if (logger.isDebugEnabled()) {
writeLog(null, LocationAwareLogger.DEBUG_INT, format, args, null);
}
}
@Override
public void debug(String msg, Throwable t) {
if (logger.isDebugEnabled()) {
writeLog(null, LocationAwareLogger.DEBUG_INT, msg, null, t);
}
}
@Override
public void debug(Marker marker, String msg) {
if (logger.isDebugEnabled()) {
writeLog(marker, LocationAwareLogger.DEBUG_INT, msg, null, null);
}
}
@Override
public void debug(Marker marker, String format, Object arg) {
if (logger.isDebugEnabled()) {
writeLog(marker, LocationAwareLogger.DEBUG_INT, format, new Object[]{arg}, null);
}
}
@Override
public void debug(Marker marker, String format, Object arg1, Object arg2) {
if (logger.isDebugEnabled()) {
writeLog(marker, LocationAwareLogger.DEBUG_INT, format, new Object[]{arg1, arg2}, null);
}
}
@Override
public void debug(Marker marker, String format, Object... arguments) {
if (logger.isDebugEnabled()) {
writeLog(marker, LocationAwareLogger.DEBUG_INT, format, arguments, null);
}
}
@Override
public void debug(Marker marker, String msg, Throwable t) {
if (logger.isDebugEnabled()) {
writeLog(marker, LocationAwareLogger.DEBUG_INT, msg, null, t);
}
}
@Override
public void warn(String message) {
writeLog(null, LocationAwareLogger.WARN_INT, message, null, null);
}
@Override
public void warn(String format, Object arg) {
writeLog(null, LocationAwareLogger.WARN_INT, format, new Object[]{arg}, null);
}
@Override
public void warn(String message, Object arg1, Object arg2) {
writeLog(null, LocationAwareLogger.WARN_INT, message, new Object[]{arg1, arg2}, null);
}
@Override
public void warn(String format, Object... args) {
writeLog(null, LocationAwareLogger.WARN_INT, format, args, null);
}
@Override
public void warn(String msg, Throwable t) {
writeLog(null, LocationAwareLogger.WARN_INT, msg, null, t);
}
@Override
public void warn(Marker marker, String msg) {
writeLog(marker, LocationAwareLogger.WARN_INT, msg, null, null);
}
@Override
public void warn(Marker marker, String format, Object arg) {
writeLog(marker, LocationAwareLogger.WARN_INT, format, new Object[]{arg}, null);
}
@Override
public void warn(Marker marker, String format, Object arg1, Object arg2) {
writeLog(marker, LocationAwareLogger.WARN_INT, format, new Object[]{arg1, arg2}, null);
}
@Override
public void warn(Marker marker, String format, Object... arguments) {
writeLog(marker, LocationAwareLogger.WARN_INT, format, arguments, null);
}
@Override
public void warn(Marker marker, String msg, Throwable t) {
writeLog(marker, LocationAwareLogger.WARN_INT, msg, null, t);
}
@Override
public void error(String message) {
writeLog(null, LocationAwareLogger.ERROR_INT, message, null, null);
}
@Override
public void error(String format, Object arg) {
writeLog(null, LocationAwareLogger.ERROR_INT, format, new Object[]{arg}, null);
}
@Override
public void error(String format, Object arg1, Object arg2) {
writeLog(null, LocationAwareLogger.ERROR_INT, format, new Object[]{arg1, arg2}, null);
}
@Override
public void error(String format, Object... args) {
writeLog(null, LocationAwareLogger.ERROR_INT, format, args, null);
}
@Override
public void error(String msg, Throwable t) {
writeLog(null, LocationAwareLogger.ERROR_INT, msg, null, t);
}
@Override
public void error(Marker marker, String msg) {
writeLog(marker, LocationAwareLogger.ERROR_INT, msg, null, null);
}
@Override
public void error(Marker marker, String format, Object arg) {
writeLog(marker, LocationAwareLogger.ERROR_INT, format, new Object[]{arg}, null);
}
@Override
public void error(Marker marker, String format, Object arg1, Object arg2) {
writeLog(marker, LocationAwareLogger.ERROR_INT, format, new Object[]{arg1, arg2}, null);
}
@Override
public void error(Marker marker, String format, Object... arguments) {
writeLog(marker, LocationAwareLogger.ERROR_INT, format, arguments, null);
}
@Override
public void error(Marker marker, String msg, Throwable t) {
writeLog(marker, LocationAwareLogger.ERROR_INT, msg, null, t);
}
@Override
public void info(String msg) {
writeLog(null, LocationAwareLogger.INFO_INT, msg, null, null);
}
@Override
public void info(String format, Object arg) {
writeLog(null, LocationAwareLogger.INFO_INT, format, new Object[]{arg}, null);
}
@Override
public void info(String format, Object arg1, Object arg2) {
writeLog(null, LocationAwareLogger.INFO_INT, format, new Object[]{arg1, arg2}, null);
}
@Override
public void info(String format, Object... args) {
writeLog(null, LocationAwareLogger.INFO_INT, format, args, null);
}
@Override
public void info(String msg, Throwable t) {
writeLog(null, LocationAwareLogger.INFO_INT, msg, null, t);
}
@Override
public void info(Marker marker, String msg) {
writeLog(marker, LocationAwareLogger.INFO_INT, msg, null, null);
}
@Override
public void info(Marker marker, String format, Object arg) {
writeLog(marker, LocationAwareLogger.INFO_INT, format, new Object[]{arg}, null);
}
@Override
public void info(Marker marker, String format, Object arg1, Object arg2) {
writeLog(marker, LocationAwareLogger.INFO_INT, format, new Object[]{arg1, arg2}, null);
}
@Override
public void info(Marker marker, String format, Object... arguments) {
writeLog(marker, LocationAwareLogger.INFO_INT, format, arguments, null);
}
@Override
public void info(Marker marker, String msg, Throwable t) {
writeLog(marker, LocationAwareLogger.INFO_INT, msg, null, t);
}
private void writeLog(Marker marker, int level, String format, Object[] args, Throwable exception) {
String message = format;
if (args != null && args.length > 0) {
FormattingTuple formatted = MessageFormatter.arrayFormat(format, args);
if (exception == null && formatted.getThrowable() != null) {
exception = formatted.getThrowable();
}
message = formatted.getMessage();
}
logger.log(marker, fqcn, level, addPrefix(message), null, exception);
}
}
private static class LocationIgnorantKafkaLogger extends AbstractKafkaLogger {
private final Logger logger;
LocationIgnorantKafkaLogger(String logPrefix, Logger logger) {
super(logPrefix);
this.logger = logger;
}
@Override
public String getName() {
return logger.getName();
}
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
@Override
public boolean isTraceEnabled(Marker marker) {
return logger.isTraceEnabled(marker);
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
@Override
public boolean isDebugEnabled(Marker marker) {
return logger.isDebugEnabled(marker);
}
@Override
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
@Override
public boolean isInfoEnabled(Marker marker) {
return logger.isInfoEnabled(marker);
}
@Override
public boolean isWarnEnabled() {
return logger.isWarnEnabled();
}
@Override
public boolean isWarnEnabled(Marker marker) {
return logger.isWarnEnabled(marker);
}
@Override
public boolean isErrorEnabled() {
return logger.isErrorEnabled();
}
@Override
public boolean isErrorEnabled(Marker marker) {
return logger.isErrorEnabled(marker);
}
@Override
public void trace(String message) {
if (logger.isTraceEnabled()) {
logger.trace(addPrefix(message));
}
}
@Override
public void trace(String message, Object arg) {
if (logger.isTraceEnabled()) {
logger.trace(addPrefix(message), arg);
}
}
@Override
public void trace(String message, Object arg1, Object arg2) {
if (logger.isTraceEnabled()) {
logger.trace(addPrefix(message), arg1, arg2);
}
}
@Override
public void trace(String message, Object... args) {
if (logger.isTraceEnabled()) {
logger.trace(addPrefix(message), args);
}
}
@Override
public void trace(String msg, Throwable t) {
if (logger.isTraceEnabled()) {
logger.trace(addPrefix(msg), t);
}
}
@Override
public void trace(Marker marker, String msg) {
if (logger.isTraceEnabled()) {
logger.trace(marker, addPrefix(msg));
}
}
@Override
public void trace(Marker marker, String format, Object arg) {
if (logger.isTraceEnabled()) {
logger.trace(marker, addPrefix(format), arg);
}
}
@Override
public void trace(Marker marker, String format, Object arg1, Object arg2) {
if (logger.isTraceEnabled()) {
logger.trace(marker, addPrefix(format), arg1, arg2);
}
}
@Override
public void trace(Marker marker, String format, Object... argArray) {
if (logger.isTraceEnabled()) {
logger.trace(marker, addPrefix(format), argArray);
}
}
@Override
public void trace(Marker marker, String msg, Throwable t) {
if (logger.isTraceEnabled()) {
logger.trace(marker, addPrefix(msg), t);
}
}
@Override
public void debug(String message) {
if (logger.isDebugEnabled()) {
logger.debug(addPrefix(message));
}
}
@Override
public void debug(String message, Object arg) {
if (logger.isDebugEnabled()) {
logger.debug(addPrefix(message), arg);
}
}
@Override
public void debug(String message, Object arg1, Object arg2) {
if (logger.isDebugEnabled()) {
logger.debug(addPrefix(message), arg1, arg2);
}
}
@Override
public void debug(String message, Object... args) {
if (logger.isDebugEnabled()) {
logger.debug(addPrefix(message), args);
}
}
@Override
public void debug(String msg, Throwable t) {
if (logger.isDebugEnabled()) {
logger.debug(addPrefix(msg), t);
}
}
@Override
public void debug(Marker marker, String msg) {
if (logger.isDebugEnabled()) {
logger.debug(marker, addPrefix(msg));
}
}
@Override
public void debug(Marker marker, String format, Object arg) {
if (logger.isDebugEnabled()) {
logger.debug(marker, addPrefix(format), arg);
}
}
@Override
public void debug(Marker marker, String format, Object arg1, Object arg2) {
if (logger.isDebugEnabled()) {
logger.debug(marker, addPrefix(format), arg1, arg2);
}
}
@Override
public void debug(Marker marker, String format, Object... arguments) {
if (logger.isDebugEnabled()) {
logger.debug(marker, addPrefix(format), arguments);
}
}
@Override
public void debug(Marker marker, String msg, Throwable t) {
if (logger.isDebugEnabled()) {
logger.debug(marker, addPrefix(msg), t);
}
}
@Override
public void warn(String message) {
logger.warn(addPrefix(message));
}
@Override
public void warn(String message, Object arg) {
logger.warn(addPrefix(message), arg);
}
@Override
public void warn(String message, Object arg1, Object arg2) {
logger.warn(addPrefix(message), arg1, arg2);
}
@Override
public void warn(String message, Object... args) {
logger.warn(addPrefix(message), args);
}
@Override
public void warn(String msg, Throwable t) {
logger.warn(addPrefix(msg), t);
}
@Override
public void warn(Marker marker, String msg) {
logger.warn(marker, addPrefix(msg));
}
@Override
public void warn(Marker marker, String format, Object arg) {
logger.warn(marker, addPrefix(format), arg);
}
@Override
public void warn(Marker marker, String format, Object arg1, Object arg2) {
logger.warn(marker, addPrefix(format), arg1, arg2);
}
@Override
public void warn(Marker marker, String format, Object... arguments) {
logger.warn(marker, addPrefix(format), arguments);
}
@Override
public void warn(Marker marker, String msg, Throwable t) {
logger.warn(marker, addPrefix(msg), t);
}
@Override
public void error(String message) {
logger.error(addPrefix(message));
}
@Override
public void error(String message, Object arg) {
logger.error(addPrefix(message), arg);
}
@Override
public void error(String message, Object arg1, Object arg2) {
logger.error(addPrefix(message), arg1, arg2);
}
@Override
public void error(String message, Object... args) {
logger.error(addPrefix(message), args);
}
@Override
public void error(String msg, Throwable t) {
logger.error(addPrefix(msg), t);
}
@Override
public void error(Marker marker, String msg) {
logger.error(marker, addPrefix(msg));
}
@Override
public void error(Marker marker, String format, Object arg) {
logger.error(marker, addPrefix(format), arg);
}
@Override
public void error(Marker marker, String format, Object arg1, Object arg2) {
logger.error(marker, addPrefix(format), arg1, arg2);
}
@Override
public void error(Marker marker, String format, Object... arguments) {
logger.error(marker, addPrefix(format), arguments);
}
@Override
public void error(Marker marker, String msg, Throwable t) {
logger.error(marker, addPrefix(msg), t);
}
@Override
public void info(String message) {
logger.info(addPrefix(message));
}
@Override
public void info(String message, Object arg) {
logger.info(addPrefix(message), arg);
}
@Override
public void info(String message, Object arg1, Object arg2) {
logger.info(addPrefix(message), arg1, arg2);
}
@Override
public void info(String message, Object... args) {
logger.info(addPrefix(message), args);
}
@Override
public void info(String msg, Throwable t) {
logger.info(addPrefix(msg), t);
}
@Override
public void info(Marker marker, String msg) {
logger.info(marker, addPrefix(msg));
}
@Override
public void info(Marker marker, String format, Object arg) {
logger.info(marker, addPrefix(format), arg);
}
@Override
public void info(Marker marker, String format, Object arg1, Object arg2) {
logger.info(marker, addPrefix(format), arg1, arg2);
}
@Override
public void info(Marker marker, String format, Object... arguments) {
logger.info(marker, addPrefix(format), arguments);
}
@Override
public void info(Marker marker, String msg, Throwable t) {
logger.info(marker, addPrefix(msg), t);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/LoggingSignalHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
public class LoggingSignalHandler {
private static final Logger log = LoggerFactory.getLogger(LoggingSignalHandler.class);
private static final List<String> SIGNALS = Arrays.asList("TERM", "INT", "HUP");
private final Constructor<?> signalConstructor;
private final Class<?> signalHandlerClass;
private final Class<?> signalClass;
private final Method signalHandleMethod;
private final Method signalGetNameMethod;
private final Method signalHandlerHandleMethod;
/**
* Create an instance of this class.
*
* @throws ReflectiveOperationException if the underlying API has changed in an incompatible manner.
*/
public LoggingSignalHandler() throws ReflectiveOperationException {
signalClass = Class.forName("sun.misc.Signal");
signalConstructor = signalClass.getConstructor(String.class);
signalHandlerClass = Class.forName("sun.misc.SignalHandler");
signalHandlerHandleMethod = signalHandlerClass.getMethod("handle", signalClass);
signalHandleMethod = signalClass.getMethod("handle", signalClass, signalHandlerClass);
signalGetNameMethod = signalClass.getMethod("getName");
}
/**
* Register signal handler to log termination due to SIGTERM, SIGHUP and SIGINT (control-c). This method
* does not currently work on Windows.
*
* @implNote sun.misc.Signal and sun.misc.SignalHandler are described as "not encapsulated" in
* http://openjdk.java.net/jeps/260. However, they are not available in the compile classpath if the `--release`
* flag is used. As a workaround, we rely on reflection.
*/
public void register() throws ReflectiveOperationException {
Map<String, Object> jvmSignalHandlers = new ConcurrentHashMap<>();
for (String signal : SIGNALS) {
register(signal, jvmSignalHandlers);
}
log.info("Registered signal handlers for " + String.join(", ", SIGNALS));
}
private Object createSignalHandler(final Map<String, Object> jvmSignalHandlers) {
InvocationHandler invocationHandler = new InvocationHandler() {
private String getName(Object signal) throws Throwable {
try {
return (String) signalGetNameMethod.invoke(signal);
} catch (InvocationTargetException e) {
throw e.getCause();
}
}
private void handle(Object signalHandler, Object signal) throws ReflectiveOperationException {
signalHandlerHandleMethod.invoke(signalHandler, signal);
}
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
Object signal = args[0];
log.info("Terminating process due to signal {}", signal);
Object handler = jvmSignalHandlers.get(getName(signal));
if (handler != null)
handle(handler, signal);
return null;
}
};
return Proxy.newProxyInstance(Utils.getContextOrKafkaClassLoader(), new Class[] {signalHandlerClass},
invocationHandler);
}
private void register(String signalName, final Map<String, Object> jvmSignalHandlers) throws ReflectiveOperationException {
Object signal = signalConstructor.newInstance(signalName);
Object signalHandler = createSignalHandler(jvmSignalHandlers);
Object oldHandler = signalHandleMethod.invoke(null, signal, signalHandler);
if (oldHandler != null)
jvmSignalHandlers.put(signalName, oldHandler);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/MappedIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.Iterator;
import java.util.function.Function;
/**
* An iterator that maps another iterator's elements from type `F` to type `T`.
*/
public final class MappedIterator<F, T> implements Iterator<T> {
private final Iterator<? extends F> underlyingIterator;
private final Function<F, T> mapper;
public MappedIterator(Iterator<? extends F> underlyingIterator, Function<F, T> mapper) {
this.underlyingIterator = underlyingIterator;
this.mapper = mapper;
}
@Override
public boolean hasNext() {
return underlyingIterator.hasNext();
}
@Override
public T next() {
return mapper.apply(underlyingIterator.next());
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/OperatingSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.Locale;
public final class OperatingSystem {
private OperatingSystem() {
}
public static final String NAME;
public static final boolean IS_WINDOWS;
public static final boolean IS_ZOS;
static {
NAME = System.getProperty("os.name").toLowerCase(Locale.ROOT);
IS_WINDOWS = NAME.startsWith("windows");
IS_ZOS = NAME.startsWith("z/os");
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/PrimitiveRef.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
/**
* Primitive reference used to pass primitive typed values as parameter-by-reference.
*
* This is cheaper than using Atomic references.
*/
public class PrimitiveRef {
public static IntRef ofInt(int value) {
return new IntRef(value);
}
public static LongRef ofLong(long value) {
return new LongRef(value);
}
public static class IntRef {
public int value;
IntRef(int value) {
this.value = value;
}
@Override
public String toString() {
return "IntRef(" + value + ")";
}
}
public static class LongRef {
public long value;
LongRef(long value) {
this.value = value;
}
@Override
public String toString() {
return "LongRef(" + value + ")";
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/ProducerIdAndEpoch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import org.apache.kafka.common.record.RecordBatch;
public class ProducerIdAndEpoch {
public static final ProducerIdAndEpoch NONE = new ProducerIdAndEpoch(RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH);
public final long producerId;
public final short epoch;
public ProducerIdAndEpoch(long producerId, short epoch) {
this.producerId = producerId;
this.epoch = epoch;
}
public boolean isValid() {
return RecordBatch.NO_PRODUCER_ID < producerId;
}
@Override
public String toString() {
return "ProducerIdAndEpoch(producerId=" + producerId + ", epoch=" + epoch + ")";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ProducerIdAndEpoch that = (ProducerIdAndEpoch) o;
if (producerId != that.producerId) return false;
return epoch == that.epoch;
}
@Override
public int hashCode() {
int result = (int) (producerId ^ (producerId >>> 32));
result = 31 * result + (int) epoch;
return result;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/PureJavaCrc32C.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Some portions of this file Copyright (c) 2004-2006 Intel Corporation and
* licensed under the BSD license.
*/
package org.apache.kafka.common.utils;
import java.util.zip.Checksum;
/**
* This class was taken from Hadoop: org.apache.hadoop.util.PureJavaCrc32C.
*
* A pure-java implementation of the CRC32 checksum that uses
* the CRC32-C polynomial, the same polynomial used by iSCSI
* and implemented on many Intel chipsets supporting SSE4.2.
*
* NOTE: This class is intended for INTERNAL usage only within Kafka.
*/
// The exact version that was retrieved from Hadoop:
// https://github.com/apache/hadoop/blob/224de4f92c222a7b915e9c5d6bdd1a4a3fcbcf31/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32C.java
public class PureJavaCrc32C implements Checksum {
/** the current CRC value, bit-flipped */
private int crc;
public PureJavaCrc32C() {
reset();
}
@Override
public long getValue() {
long ret = crc;
return (~ret) & 0xffffffffL;
}
@Override
public void reset() {
crc = 0xffffffff;
}
@SuppressWarnings("fallthrough")
@Override
public void update(byte[] b, int off, int len) {
int localCrc = crc;
while (len > 7) {
final int c0 = (b[off + 0] ^ localCrc) & 0xff;
final int c1 = (b[off + 1] ^ (localCrc >>>= 8)) & 0xff;
final int c2 = (b[off + 2] ^ (localCrc >>>= 8)) & 0xff;
final int c3 = (b[off + 3] ^ (localCrc >>>= 8)) & 0xff;
localCrc = (T[T8_7_START + c0] ^ T[T8_6_START + c1])
^ (T[T8_5_START + c2] ^ T[T8_4_START + c3]);
final int c4 = b[off + 4] & 0xff;
final int c5 = b[off + 5] & 0xff;
final int c6 = b[off + 6] & 0xff;
final int c7 = b[off + 7] & 0xff;
localCrc ^= (T[T8_3_START + c4] ^ T[T8_2_START + c5])
^ (T[T8_1_START + c6] ^ T[T8_0_START + c7]);
off += 8;
len -= 8;
}
/* loop unroll - duff's device style */
switch (len) {
case 7:
localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)];
case 6:
localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)];
case 5:
localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)];
case 4:
localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)];
case 3:
localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)];
case 2:
localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)];
case 1:
localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)];
default:
/* nothing */
}
// Publish crc out to object
crc = localCrc;
}
@Override
final public void update(int b) {
crc = (crc >>> 8) ^ T[T8_0_START + ((crc ^ b) & 0xff)];
}
// CRC polynomial tables generated by:
// java -cp build/test/classes/:build/classes/ \
// org.apache.hadoop.util.TestPureJavaCrc32\$Table 82F63B78
private static final int T8_0_START = 0 * 256;
private static final int T8_1_START = 1 * 256;
private static final int T8_2_START = 2 * 256;
private static final int T8_3_START = 3 * 256;
private static final int T8_4_START = 4 * 256;
private static final int T8_5_START = 5 * 256;
private static final int T8_6_START = 6 * 256;
private static final int T8_7_START = 7 * 256;
private static final int[] T = new int[]{
/* T8_0 */
0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
/* T8_1 */
0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899,
0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945,
0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21,
0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD,
0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918,
0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0,
0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C,
0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B,
0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47,
0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823,
0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A,
0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6,
0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2,
0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E,
0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D,
0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41,
0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25,
0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9,
0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C,
0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0,
0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4,
0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78,
0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F,
0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43,
0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27,
0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB,
0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E,
0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2,
0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6,
0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A,
0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260,
0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC,
0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8,
0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004,
0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1,
0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D,
0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059,
0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185,
0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162,
0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE,
0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA,
0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306,
0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3,
0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F,
0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B,
0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287,
0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464,
0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8,
0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC,
0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600,
0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5,
0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439,
0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D,
0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781,
0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766,
0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA,
0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE,
0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502,
0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7,
0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B,
0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F,
0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483,
/* T8_2 */
0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073,
0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469,
0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6,
0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC,
0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9,
0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C,
0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726,
0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67,
0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D,
0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2,
0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED,
0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7,
0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828,
0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32,
0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA,
0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0,
0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F,
0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75,
0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20,
0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A,
0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5,
0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF,
0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE,
0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4,
0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B,
0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161,
0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634,
0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E,
0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1,
0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB,
0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730,
0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A,
0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5,
0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF,
0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA,
0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0,
0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F,
0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065,
0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24,
0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E,
0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1,
0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB,
0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE,
0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4,
0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B,
0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71,
0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9,
0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3,
0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C,
0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36,
0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63,
0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79,
0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6,
0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC,
0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD,
0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7,
0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238,
0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622,
0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177,
0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D,
0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2,
0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8,
/* T8_3 */
0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939,
0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA,
0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF,
0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C,
0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804,
0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2,
0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11,
0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2,
0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41,
0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54,
0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F,
0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C,
0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69,
0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A,
0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE,
0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D,
0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538,
0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB,
0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3,
0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610,
0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405,
0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6,
0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255,
0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6,
0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3,
0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040,
0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368,
0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B,
0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E,
0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D,
0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006,
0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5,
0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0,
0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213,
0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B,
0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8,
0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD,
0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E,
0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D,
0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E,
0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B,
0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698,
0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0,
0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443,
0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656,
0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5,
0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1,
0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12,
0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07,
0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4,
0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC,
0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F,
0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A,
0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9,
0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A,
0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99,
0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C,
0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F,
0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57,
0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4,
0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1,
0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842,
/* T8_4 */
0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4,
0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44,
0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65,
0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5,
0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127,
0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6,
0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406,
0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3,
0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13,
0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32,
0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470,
0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0,
0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1,
0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151,
0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A,
0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA,
0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB,
0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B,
0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89,
0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539,
0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018,
0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8,
0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D,
0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD,
0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C,
0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C,
0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE,
0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E,
0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F,
0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF,
0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8,
0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18,
0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39,
0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089,
0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B,
0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB,
0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA,
0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A,
0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF,
0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F,
0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E,
0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE,
0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C,
0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C,
0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD,
0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D,
0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06,
0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6,
0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497,
0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27,
0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5,
0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065,
0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544,
0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4,
0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51,
0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1,
0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0,
0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70,
0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82,
0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532,
0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013,
0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3,
/* T8_5 */
0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA,
0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD,
0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5,
0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2,
0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4,
0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB,
0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C,
0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57,
0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20,
0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548,
0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69,
0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E,
0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576,
0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201,
0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031,
0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746,
0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E,
0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59,
0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F,
0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778,
0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810,
0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67,
0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC,
0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB,
0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3,
0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4,
0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682,
0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5,
0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D,
0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA,
0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C,
0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B,
0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413,
0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364,
0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32,
0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45,
0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D,
0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A,
0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81,
0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6,
0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E,
0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9,
0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF,
0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8,
0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0,
0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7,
0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7,
0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090,
0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8,
0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F,
0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9,
0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE,
0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6,
0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1,
0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A,
0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D,
0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975,
0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02,
0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154,
0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623,
0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B,
0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C,
/* T8_6 */
0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558,
0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089,
0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B,
0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA,
0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE,
0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD,
0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C,
0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5,
0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334,
0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6,
0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43,
0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992,
0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110,
0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1,
0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222,
0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3,
0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71,
0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0,
0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884,
0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55,
0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7,
0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006,
0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F,
0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E,
0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC,
0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D,
0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39,
0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8,
0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A,
0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB,
0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC,
0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D,
0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF,
0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E,
0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A,
0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB,
0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59,
0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988,
0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811,
0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0,
0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542,
0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093,
0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7,
0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766,
0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4,
0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35,
0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6,
0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907,
0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185,
0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454,
0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670,
0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1,
0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23,
0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2,
0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B,
0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA,
0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238,
0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9,
0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD,
0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C,
0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E,
0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F,
/* T8_7 */
0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769,
0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504,
0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3,
0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE,
0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD,
0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07,
0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A,
0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0,
0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D,
0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A,
0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44,
0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929,
0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E,
0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3,
0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B,
0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36,
0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881,
0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC,
0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF,
0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782,
0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135,
0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358,
0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2,
0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF,
0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18,
0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75,
0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076,
0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B,
0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC,
0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1,
0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D,
0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360,
0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7,
0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA,
0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9,
0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4,
0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63,
0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E,
0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494,
0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9,
0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E,
0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223,
0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20,
0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D,
0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA,
0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97,
0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F,
0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852,
0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5,
0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88,
0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B,
0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6,
0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751,
0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C,
0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6,
0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB,
0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C,
0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911,
0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612,
0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F,
0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8,
0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5
};
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/Sanitizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.util.regex.Pattern;
import javax.management.ObjectName;
import org.apache.kafka.common.KafkaException;
/**
* Utility class for sanitizing/desanitizing/quoting values used in JMX metric names
* or as ZooKeeper node name.
* <p>
* User principals and client-ids are URL-encoded using ({@link #sanitize(String)}
* for use as ZooKeeper node names. User principals are URL-encoded in all metric
* names as well. All other metric tags including client-id are quoted if they
* contain special characters using {@link #jmxSanitize(String)} when
* registering in JMX.
*/
public class Sanitizer {
/**
* Even though only a small number of characters are disallowed in JMX, quote any
* string containing special characters to be safe. All characters in strings sanitized
* using {@link #sanitize(String)} are safe for JMX and hence included here.
*/
private static final Pattern MBEAN_PATTERN = Pattern.compile("[\\w-%\\. \t]*");
/**
* Sanitize `name` for safe use as JMX metric name as well as ZooKeeper node name
* using URL-encoding.
*/
public static String sanitize(String name) {
try {
String encoded = URLEncoder.encode(name, StandardCharsets.UTF_8.name());
StringBuilder builder = new StringBuilder();
for (int i = 0; i < encoded.length(); i++) {
char c = encoded.charAt(i);
if (c == '*') { // Metric ObjectName treats * as pattern
builder.append("%2A");
} else if (c == '+') { // Space URL-encoded as +, replace with percent encoding
builder.append("%20");
} else {
builder.append(c);
}
}
return builder.toString();
} catch (UnsupportedEncodingException e) {
throw new KafkaException(e);
}
}
/**
* Desanitize name that was URL-encoded using {@link #sanitize(String)}. This
* is used to obtain the desanitized version of node names in ZooKeeper.
*/
public static String desanitize(String name) {
try {
return URLDecoder.decode(name, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
throw new KafkaException(e);
}
}
/**
* Quote `name` using {@link ObjectName#quote(String)} if `name` contains
* characters that are not safe for use in JMX. User principals that are
* already sanitized using {@link #sanitize(String)} will not be quoted
* since they are safe for JMX.
*/
public static String jmxSanitize(String name) {
return MBEAN_PATTERN.matcher(name).matches() ? name : ObjectName.quote(name);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/Scheduler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
/**
* An interface for scheduling tasks for the future.
*
* Implementations of this class should be thread-safe.
*/
public interface Scheduler {
Scheduler SYSTEM = new SystemScheduler();
/**
* Get the timekeeper associated with this scheduler.
*/
Time time();
/**
* Schedule a callable to be executed in the future on a
* ScheduledExecutorService. Note that the Callable may not be queued on
* the executor until the designated time arrives.
*
* @param executor The executor to use.
* @param callable The callable to execute.
* @param delayMs The delay to use, in milliseconds.
* @param <T> The return type of the callable.
* @return A future which will complete when the callable is finished.
*/
<T> Future<T> schedule(final ScheduledExecutorService executor,
final Callable<T> callable, long delayMs);
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/SecurityUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.config.SecurityConfig;
import org.apache.kafka.common.resource.ResourceType;
import org.apache.kafka.common.security.auth.SecurityProviderCreator;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.security.Security;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
public class SecurityUtils {
private static final Logger LOGGER = LoggerFactory.getLogger(SecurityConfig.class);
private static final Map<String, ResourceType> NAME_TO_RESOURCE_TYPES;
private static final Map<String, AclOperation> NAME_TO_OPERATIONS;
private static final Map<String, AclPermissionType> NAME_TO_PERMISSION_TYPES;
static {
NAME_TO_RESOURCE_TYPES = new HashMap<>(ResourceType.values().length);
NAME_TO_OPERATIONS = new HashMap<>(AclOperation.values().length);
NAME_TO_PERMISSION_TYPES = new HashMap<>(AclPermissionType.values().length);
for (ResourceType resourceType : ResourceType.values()) {
String resourceTypeName = toPascalCase(resourceType.name());
NAME_TO_RESOURCE_TYPES.put(resourceTypeName, resourceType);
NAME_TO_RESOURCE_TYPES.put(resourceTypeName.toUpperCase(Locale.ROOT), resourceType);
}
for (AclOperation operation : AclOperation.values()) {
String operationName = toPascalCase(operation.name());
NAME_TO_OPERATIONS.put(operationName, operation);
NAME_TO_OPERATIONS.put(operationName.toUpperCase(Locale.ROOT), operation);
}
for (AclPermissionType permissionType : AclPermissionType.values()) {
String permissionName = toPascalCase(permissionType.name());
NAME_TO_PERMISSION_TYPES.put(permissionName, permissionType);
NAME_TO_PERMISSION_TYPES.put(permissionName.toUpperCase(Locale.ROOT), permissionType);
}
}
public static KafkaPrincipal parseKafkaPrincipal(String str) {
if (str == null || str.isEmpty()) {
throw new IllegalArgumentException("expected a string in format principalType:principalName but got " + str);
}
String[] split = str.split(":", 2);
if (split.length != 2) {
throw new IllegalArgumentException("expected a string in format principalType:principalName but got " + str);
}
return new KafkaPrincipal(split[0], split[1]);
}
public static void addConfiguredSecurityProviders(Map<String, ?> configs) {
String securityProviderClassesStr = (String) configs.get(SecurityConfig.SECURITY_PROVIDERS_CONFIG);
if (securityProviderClassesStr == null || securityProviderClassesStr.equals("")) {
return;
}
try {
String[] securityProviderClasses = securityProviderClassesStr.replaceAll("\\s+", "").split(",");
for (int index = 0; index < securityProviderClasses.length; index++) {
SecurityProviderCreator securityProviderCreator =
(SecurityProviderCreator) Class.forName(securityProviderClasses[index]).getConstructor().newInstance();
securityProviderCreator.configure(configs);
Security.insertProviderAt(securityProviderCreator.getProvider(), index + 1);
}
} catch (ClassCastException e) {
LOGGER.error("Creators provided through " + SecurityConfig.SECURITY_PROVIDERS_CONFIG +
" are expected to be sub-classes of SecurityProviderCreator");
} catch (ClassNotFoundException cnfe) {
LOGGER.error("Unrecognized security provider creator class", cnfe);
} catch (ReflectiveOperationException e) {
LOGGER.error("Unexpected implementation of security provider creator class", e);
}
}
public static ResourceType resourceType(String name) {
return valueFromMap(NAME_TO_RESOURCE_TYPES, name, ResourceType.UNKNOWN);
}
public static AclOperation operation(String name) {
return valueFromMap(NAME_TO_OPERATIONS, name, AclOperation.UNKNOWN);
}
public static AclPermissionType permissionType(String name) {
return valueFromMap(NAME_TO_PERMISSION_TYPES, name, AclPermissionType.UNKNOWN);
}
// We use Pascal-case to store these values, so lookup using provided key first to avoid
// case conversion for the common case. For backward compatibility, also perform
// case-insensitive look up (without underscores) by converting the key to upper-case.
private static <T> T valueFromMap(Map<String, T> map, String key, T unknown) {
T value = map.get(key);
if (value == null) {
value = map.get(key.toUpperCase(Locale.ROOT));
}
return value == null ? unknown : value;
}
public static String resourceTypeName(ResourceType resourceType) {
return toPascalCase(resourceType.name());
}
public static String operationName(AclOperation operation) {
return toPascalCase(operation.name());
}
public static String permissionTypeName(AclPermissionType permissionType) {
return toPascalCase(permissionType.name());
}
private static String toPascalCase(String name) {
StringBuilder builder = new StringBuilder();
boolean capitalizeNext = true;
for (char c : name.toCharArray()) {
if (c == '_')
capitalizeNext = true;
else if (capitalizeNext) {
builder.append(Character.toUpperCase(c));
capitalizeNext = false;
} else
builder.append(Character.toLowerCase(c));
}
return builder.toString();
}
public static void authorizeByResourceTypeCheckArgs(AclOperation op,
ResourceType type) {
if (type == ResourceType.ANY) {
throw new IllegalArgumentException(
"Must specify a non-filter resource type for authorizeByResourceType");
}
if (type == ResourceType.UNKNOWN) {
throw new IllegalArgumentException(
"Unknown resource type");
}
if (op == AclOperation.ANY) {
throw new IllegalArgumentException(
"Must specify a non-filter operation type for authorizeByResourceType");
}
if (op == AclOperation.UNKNOWN) {
throw new IllegalArgumentException(
"Unknown operation type");
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/Shell.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A base class for running a Unix command.
*
* <code>Shell</code> can be used to run unix commands like <code>du</code> or
* <code>df</code>.
*/
abstract public class Shell {
private static final Logger LOG = LoggerFactory.getLogger(Shell.class);
/** Return an array containing the command name and its parameters */
protected abstract String[] execString();
/** Parse the execution result */
protected abstract void parseExecResult(BufferedReader lines) throws IOException;
private final long timeout;
private int exitCode;
private Process process; // sub process used to execute the command
/* If or not script finished executing */
private volatile AtomicBoolean completed;
/**
* @param timeout Specifies the time in milliseconds, after which the command will be killed. -1 means no timeout.
*/
public Shell(long timeout) {
this.timeout = timeout;
}
/** get the exit code
* @return the exit code of the process
*/
public int exitCode() {
return exitCode;
}
/** get the current sub-process executing the given command
* @return process executing the command
*/
public Process process() {
return process;
}
protected void run() throws IOException {
exitCode = 0; // reset for next run
runCommand();
}
/** Run a command */
private void runCommand() throws IOException {
ProcessBuilder builder = new ProcessBuilder(execString());
Timer timeoutTimer = null;
completed = new AtomicBoolean(false);
process = builder.start();
if (timeout > -1) {
timeoutTimer = new Timer();
//One time scheduling.
timeoutTimer.schedule(new ShellTimeoutTimerTask(this), timeout);
}
final BufferedReader errReader = new BufferedReader(
new InputStreamReader(process.getErrorStream(), StandardCharsets.UTF_8));
BufferedReader inReader = new BufferedReader(
new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8));
final StringBuffer errMsg = new StringBuffer();
// read error and input streams as this would free up the buffers
// free the error stream buffer
Thread errThread = KafkaThread.nonDaemon("kafka-shell-thread", new Runnable() {
@Override
public void run() {
try {
String line = errReader.readLine();
while ((line != null) && !Thread.currentThread().isInterrupted()) {
errMsg.append(line);
errMsg.append(System.getProperty("line.separator"));
line = errReader.readLine();
}
} catch (IOException ioe) {
LOG.warn("Error reading the error stream", ioe);
}
}
});
errThread.start();
try {
parseExecResult(inReader); // parse the output
// wait for the process to finish and check the exit code
exitCode = process.waitFor();
try {
// make sure that the error thread exits
errThread.join();
} catch (InterruptedException ie) {
LOG.warn("Interrupted while reading the error stream", ie);
}
completed.set(true);
//the timeout thread handling
//taken care in finally block
if (exitCode != 0) {
throw new ExitCodeException(exitCode, errMsg.toString());
}
} catch (InterruptedException ie) {
throw new IOException(ie.toString());
} finally {
if (timeoutTimer != null)
timeoutTimer.cancel();
// close the input stream
try {
inReader.close();
} catch (IOException ioe) {
LOG.warn("Error while closing the input stream", ioe);
}
if (!completed.get())
errThread.interrupt();
try {
errReader.close();
} catch (IOException ioe) {
LOG.warn("Error while closing the error stream", ioe);
}
process.destroy();
}
}
/**
* This is an IOException with exit code added.
*/
@SuppressWarnings("serial")
public static class ExitCodeException extends IOException {
int exitCode;
public ExitCodeException(int exitCode, String message) {
super(message);
this.exitCode = exitCode;
}
public int getExitCode() {
return exitCode;
}
}
/**
* A simple shell command executor.
*
* <code>ShellCommandExecutor</code>should be used in cases where the output
* of the command needs no explicit parsing and where the command, working
* directory and the environment remains unchanged. The output of the command
* is stored as-is and is expected to be small.
*/
public static class ShellCommandExecutor extends Shell {
private final String[] command;
private StringBuffer output;
/**
* Create a new instance of the ShellCommandExecutor to execute a command.
*
* @param execString The command to execute with arguments
* @param timeout Specifies the time in milliseconds, after which the
* command will be killed. -1 means no timeout.
*/
public ShellCommandExecutor(String[] execString, long timeout) {
super(timeout);
command = execString.clone();
}
/** Execute the shell command. */
public void execute() throws IOException {
this.run();
}
protected String[] execString() {
return command;
}
protected void parseExecResult(BufferedReader reader) throws IOException {
output = new StringBuffer();
char[] buf = new char[512];
int nRead;
while ((nRead = reader.read(buf, 0, buf.length)) > 0) {
output.append(buf, 0, nRead);
}
}
/** Get the output of the shell command.*/
public String output() {
return (output == null) ? "" : output.toString();
}
/**
* Returns the commands of this instance.
* Arguments with spaces in are presented with quotes round; other
* arguments are presented raw
*
* @return a string representation of the object.
*/
public String toString() {
StringBuilder builder = new StringBuilder();
String[] args = execString();
for (String s : args) {
if (s.indexOf(' ') >= 0) {
builder.append('"').append(s).append('"');
} else {
builder.append(s);
}
builder.append(' ');
}
return builder.toString();
}
}
/**
* Static method to execute a shell command.
* Covers most of the simple cases without requiring the user to implement
* the <code>Shell</code> interface.
* @param cmd shell command to execute.
* @return the output of the executed command.
*/
public static String execCommand(String... cmd) throws IOException {
return execCommand(cmd, -1);
}
/**
* Static method to execute a shell command.
* Covers most of the simple cases without requiring the user to implement
* the <code>Shell</code> interface.
* @param cmd shell command to execute.
* @param timeout time in milliseconds after which script should be killed. -1 means no timeout.
* @return the output of the executed command.
*/
public static String execCommand(String[] cmd, long timeout) throws IOException {
ShellCommandExecutor exec = new ShellCommandExecutor(cmd, timeout);
exec.execute();
return exec.output();
}
/**
* Timer which is used to timeout scripts spawned off by shell.
*/
private static class ShellTimeoutTimerTask extends TimerTask {
private final Shell shell;
public ShellTimeoutTimerTask(Shell shell) {
this.shell = shell;
}
@Override
public void run() {
Process p = shell.process();
try {
p.exitValue();
} catch (Exception e) {
//Process has not terminated.
//So check if it has completed
//if not just destroy it.
if (p != null && !shell.completed.get()) {
p.destroy();
}
}
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/SystemScheduler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
/**
* A scheduler implementation that uses the system clock.
*
* Use Scheduler.SYSTEM instead of constructing an instance of this class.
*/
public class SystemScheduler implements Scheduler {
SystemScheduler() {
}
@Override
public Time time() {
return Time.SYSTEM;
}
@Override
public <T> Future<T> schedule(final ScheduledExecutorService executor,
final Callable<T> callable, long delayMs) {
return executor.schedule(callable, delayMs, TimeUnit.MILLISECONDS);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/SystemTime.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import org.apache.kafka.common.errors.TimeoutException;
import java.util.function.Supplier;
/**
* A time implementation that uses the system clock and sleep call. Use `Time.SYSTEM` instead of creating an instance
* of this class.
*/
public class SystemTime implements Time {
@Override
public long milliseconds() {
return System.currentTimeMillis();
}
@Override
public long nanoseconds() {
return System.nanoTime();
}
@Override
public void sleep(long ms) {
Utils.sleep(ms);
}
@Override
public void waitObject(Object obj, Supplier<Boolean> condition, long deadlineMs) throws InterruptedException {
synchronized (obj) {
while (true) {
if (condition.get())
return;
long currentTimeMs = milliseconds();
if (currentTimeMs >= deadlineMs)
throw new TimeoutException("Condition not satisfied before deadline");
obj.wait(deadlineMs - currentTimeMs);
}
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/ThreadUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicLong;
/**
* Utilities for working with threads.
*/
public class ThreadUtils {
/**
* Create a new ThreadFactory.
*
* @param pattern The pattern to use. If this contains %d, it will be
* replaced with a thread number. It should not contain more
* than one %d.
* @param daemon True if we want daemon threads.
* @return The new ThreadFactory.
*/
public static ThreadFactory createThreadFactory(final String pattern,
final boolean daemon) {
return new ThreadFactory() {
private final AtomicLong threadEpoch = new AtomicLong(0);
@Override
public Thread newThread(Runnable r) {
String threadName;
if (pattern.contains("%d")) {
threadName = String.format(pattern, threadEpoch.addAndGet(1));
} else {
threadName = pattern;
}
Thread thread = new Thread(r, threadName);
thread.setDaemon(daemon);
return thread;
}
};
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/Time.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.time.Duration;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
/**
* An interface abstracting the clock to use in unit testing classes that make use of clock time.
*
* Implementations of this class should be thread-safe.
*/
public interface Time {
Time SYSTEM = new SystemTime();
/**
* Returns the current time in milliseconds.
*/
long milliseconds();
/**
* Returns the value returned by `nanoseconds` converted into milliseconds.
*/
default long hiResClockMs() {
return TimeUnit.NANOSECONDS.toMillis(nanoseconds());
}
/**
* Returns the current value of the running JVM's high-resolution time source, in nanoseconds.
*
* <p>This method can only be used to measure elapsed time and is
* not related to any other notion of system or wall-clock time.
* The value returned represents nanoseconds since some fixed but
* arbitrary <i>origin</i> time (perhaps in the future, so values
* may be negative). The same origin is used by all invocations of
* this method in an instance of a Java virtual machine; other
* virtual machine instances are likely to use a different origin.
*/
long nanoseconds();
/**
* Sleep for the given number of milliseconds
*/
void sleep(long ms);
/**
* Wait for a condition using the monitor of a given object. This avoids the implicit
* dependence on system time when calling {@link Object#wait()}.
*
* @param obj The object that will be waited with {@link Object#wait()}. Note that it is the responsibility
* of the caller to call notify on this object when the condition is satisfied.
* @param condition The condition we are awaiting
* @param deadlineMs The deadline timestamp at which to raise a timeout error
*
* @throws org.apache.kafka.common.errors.TimeoutException if the timeout expires before the condition is satisfied
*/
void waitObject(Object obj, Supplier<Boolean> condition, long deadlineMs) throws InterruptedException;
/**
* Get a timer which is bound to this time instance and expires after the given timeout
*/
default Timer timer(long timeoutMs) {
return new Timer(this, timeoutMs);
}
/**
* Get a timer which is bound to this time instance and expires after the given timeout
*/
default Timer timer(Duration timeout) {
return timer(timeout.toMillis());
}
/**
* Wait for a future to complete, or time out.
*
* @param future The future to wait for.
* @param deadlineNs The time in the future, in monotonic nanoseconds, to time out.
* @return The result of the future.
* @param <T> The type of the future.
*/
default <T> T waitForFuture(
CompletableFuture<T> future,
long deadlineNs
) throws TimeoutException, InterruptedException, ExecutionException {
TimeoutException timeoutException = null;
while (true) {
long nowNs = nanoseconds();
if (deadlineNs <= nowNs) {
throw (timeoutException == null) ? new TimeoutException() : timeoutException;
}
long deltaNs = deadlineNs - nowNs;
try {
return future.get(deltaNs, TimeUnit.NANOSECONDS);
} catch (TimeoutException t) {
timeoutException = t;
}
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/Timer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
/**
* This is a helper class which makes blocking methods with a timeout easier to implement.
* In particular it enables use cases where a high-level blocking call with a timeout is
* composed of several lower level calls, each of which has their own respective timeouts. The idea
* is to create a single timer object for the high level timeout and carry it along to
* all of the lower level methods. This class also handles common problems such as integer overflow.
* This class also ensures monotonic updates to the timer even if the underlying clock is subject
* to non-monotonic behavior. For example, the remaining time returned by {@link #remainingMs()} is
* guaranteed to decrease monotonically until it hits zero.
* <p>
* Note that it is up to the caller to ensure progress of the timer using one of the
* {@link #update()} methods or {@link #sleep(long)}. The timer will cache the current time and
* return it indefinitely until the timer has been updated. This allows the caller to limit
* unnecessary system calls and update the timer only when needed. For example, a timer which is
* waiting a request sent through the {@link org.apache.kafka.clients.NetworkClient} should call
* {@link #update()} following each blocking call to
* {@link org.apache.kafka.clients.NetworkClient#poll(long, long)}.
* <p>
* A typical usage might look something like this:
*
* <pre>
* Time time = Time.SYSTEM;
* Timer timer = time.timer(500);
*
* while (!conditionSatisfied() && timer.notExpired()) {
* client.poll(timer.remainingMs(), timer.currentTimeMs());
* timer.update();
* }
* </pre>
*/
public class Timer {
private final Time time;
private long startMs;
private long currentTimeMs;
private long deadlineMs;
private long timeoutMs;
Timer(Time time, long timeoutMs) {
this.time = time;
update();
reset(timeoutMs);
}
/**
* Check timer expiration. Like {@link #remainingMs()}, this depends on the current cached
* time in milliseconds, which is only updated through one of the {@link #update()} methods
* or with {@link #sleep(long)};
*
* @return true if the timer has expired, false otherwise
*/
public boolean isExpired() {
return currentTimeMs >= deadlineMs;
}
/**
* Check whether the timer has not yet expired.
* @return true if there is still time remaining before expiration
*/
public boolean notExpired() {
return !isExpired();
}
/**
* Reset the timer to the specific timeout. This will use the underlying {@link #Timer(Time, long)}
* implementation to update the current cached time in milliseconds and it will set a new timer
* deadline.
*
* @param timeoutMs The new timeout in milliseconds
*/
public void updateAndReset(long timeoutMs) {
update();
reset(timeoutMs);
}
/**
* Reset the timer using a new timeout. Note that this does not update the cached current time
* in milliseconds, so it typically must be accompanied with a separate call to {@link #update()}.
* Typically, you can just use {@link #updateAndReset(long)}.
*
* @param timeoutMs The new timeout in milliseconds
*/
public void reset(long timeoutMs) {
if (timeoutMs < 0)
throw new IllegalArgumentException("Invalid negative timeout " + timeoutMs);
this.timeoutMs = timeoutMs;
this.startMs = this.currentTimeMs;
if (currentTimeMs > Long.MAX_VALUE - timeoutMs)
this.deadlineMs = Long.MAX_VALUE;
else
this.deadlineMs = currentTimeMs + timeoutMs;
}
/**
* Reset the timer's deadline directly.
*
* @param deadlineMs The new deadline in milliseconds
*/
public void resetDeadline(long deadlineMs) {
if (deadlineMs < 0)
throw new IllegalArgumentException("Invalid negative deadline " + deadlineMs);
this.timeoutMs = Math.max(0, deadlineMs - this.currentTimeMs);
this.startMs = this.currentTimeMs;
this.deadlineMs = deadlineMs;
}
/**
* Use the underlying {@link Time} implementation to update the current cached time. If
* the underlying time returns a value which is smaller than the current cached time,
* the update will be ignored.
*/
public void update() {
update(time.milliseconds());
}
/**
* Update the cached current time to a specific value. In some contexts, the caller may already
* have an accurate time, so this avoids unnecessary calls to system time.
* <p>
* Note that if the updated current time is smaller than the cached time, then the update
* is ignored.
*
* @param currentTimeMs The current time in milliseconds to cache
*/
public void update(long currentTimeMs) {
this.currentTimeMs = Math.max(currentTimeMs, this.currentTimeMs);
}
/**
* Get the remaining time in milliseconds until the timer expires. Like {@link #currentTimeMs},
* this depends on the cached current time, so the returned value will not change until the timer
* has been updated using one of the {@link #update()} methods or {@link #sleep(long)}.
*
* @return The cached remaining time in milliseconds until timer expiration
*/
public long remainingMs() {
return Math.max(0, deadlineMs - currentTimeMs);
}
/**
* Get the current time in milliseconds. This will return the same cached value until the timer
* has been updated using one of the {@link #update()} methods or {@link #sleep(long)} is used.
* <p>
* Note that the value returned is guaranteed to increase monotonically even if the underlying
* {@link Time} implementation goes backwards. Effectively, the timer will just wait for the
* time to catch up.
*
* @return The current cached time in milliseconds
*/
public long currentTimeMs() {
return currentTimeMs;
}
/**
* Get the amount of time that has elapsed since the timer began. If the timer was reset, this
* will be the amount of time since the last reset.
*
* @return The elapsed time since construction or the last reset
*/
public long elapsedMs() {
return currentTimeMs - startMs;
}
/**
* Get the current timeout value specified through {@link #reset(long)} or {@link #resetDeadline(long)}.
* This value is constant until altered by one of these API calls.
*
* @return The timeout in milliseconds
*/
public long timeoutMs() {
return timeoutMs;
}
/**
* Sleep for the requested duration and update the timer. Return when either the duration has
* elapsed or the timer has expired.
*
* @param durationMs The duration in milliseconds to sleep
*/
public void sleep(long durationMs) {
long sleepDurationMs = Math.min(durationMs, remainingMs());
time.sleep(sleepDurationMs);
update();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/Utils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.utils;
import java.nio.BufferUnderflowException;
import java.nio.file.StandardOpenOption;
import java.util.AbstractMap;
import java.util.EnumSet;
import java.util.Map.Entry;
import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.network.TransferableChannel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
import java.io.Closeable;
import java.io.DataOutput;
import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.StandardCopyOption;
import java.nio.file.attribute.BasicFileAttributes;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.time.Instant;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer;
import java.util.function.BinaryOperator;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collector;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
public final class Utils {
private Utils() {}
// This matches URIs of formats: host:port and protocol:\\host:port
// IPv6 is supported with [ip] pattern
private static final Pattern HOST_PORT_PATTERN = Pattern.compile(".*?\\[?([0-9a-zA-Z\\-%._:]*)\\]?:([0-9]+)");
private static final Pattern VALID_HOST_CHARACTERS = Pattern.compile("([0-9a-zA-Z\\-%._:]*)");
// Prints up to 2 decimal digits. Used for human readable printing
private static final DecimalFormat TWO_DIGIT_FORMAT = new DecimalFormat("0.##",
DecimalFormatSymbols.getInstance(Locale.ENGLISH));
private static final String[] BYTE_SCALE_SUFFIXES = new String[] {"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"};
public static final String NL = System.getProperty("line.separator");
private static final Logger log = LoggerFactory.getLogger(Utils.class);
/**
* Get a sorted list representation of a collection.
* @param collection The collection to sort
* @param <T> The class of objects in the collection
* @return An unmodifiable sorted list with the contents of the collection
*/
public static <T extends Comparable<? super T>> List<T> sorted(Collection<T> collection) {
List<T> res = new ArrayList<>(collection);
Collections.sort(res);
return Collections.unmodifiableList(res);
}
/**
* Turn the given UTF8 byte array into a string
*
* @param bytes The byte array
* @return The string
*/
public static String utf8(byte[] bytes) {
return new String(bytes, StandardCharsets.UTF_8);
}
/**
* Read a UTF8 string from a byte buffer. Note that the position of the byte buffer is not affected
* by this method.
*
* @param buffer The buffer to read from
* @param length The length of the string in bytes
* @return The UTF8 string
*/
public static String utf8(ByteBuffer buffer, int length) {
return utf8(buffer, 0, length);
}
/**
* Read a UTF8 string from the current position till the end of a byte buffer. The position of the byte buffer is
* not affected by this method.
*
* @param buffer The buffer to read from
* @return The UTF8 string
*/
public static String utf8(ByteBuffer buffer) {
return utf8(buffer, buffer.remaining());
}
/**
* Read a UTF8 string from a byte buffer at a given offset. Note that the position of the byte buffer
* is not affected by this method.
*
* @param buffer The buffer to read from
* @param offset The offset relative to the current position in the buffer
* @param length The length of the string in bytes
* @return The UTF8 string
*/
public static String utf8(ByteBuffer buffer, int offset, int length) {
if (buffer.hasArray())
return new String(buffer.array(), buffer.arrayOffset() + buffer.position() + offset, length, StandardCharsets.UTF_8);
else
return utf8(toArray(buffer, offset, length));
}
/**
* Turn a string into a utf8 byte[]
*
* @param string The string
* @return The byte[]
*/
public static byte[] utf8(String string) {
return string.getBytes(StandardCharsets.UTF_8);
}
/**
* Get the absolute value of the given number. If the number is Int.MinValue return 0. This is different from
* java.lang.Math.abs or scala.math.abs in that they return Int.MinValue (!).
*/
public static int abs(int n) {
return (n == Integer.MIN_VALUE) ? 0 : Math.abs(n);
}
/**
* Get the minimum of some long values.
* @param first Used to ensure at least one value
* @param rest The remaining values to compare
* @return The minimum of all passed values
*/
public static long min(long first, long... rest) {
long min = first;
for (long r : rest) {
if (r < min)
min = r;
}
return min;
}
/**
* Get the maximum of some long values.
* @param first Used to ensure at least one value
* @param rest The remaining values to compare
* @return The maximum of all passed values
*/
public static long max(long first, long... rest) {
long max = first;
for (long r : rest) {
if (r > max)
max = r;
}
return max;
}
public static short min(short first, short second) {
return (short) Math.min(first, second);
}
/**
* Get the length for UTF8-encoding a string without encoding it first
*
* @param s The string to calculate the length for
* @return The length when serialized
*/
public static int utf8Length(CharSequence s) {
int count = 0;
for (int i = 0, len = s.length(); i < len; i++) {
char ch = s.charAt(i);
if (ch <= 0x7F) {
count++;
} else if (ch <= 0x7FF) {
count += 2;
} else if (Character.isHighSurrogate(ch)) {
count += 4;
++i;
} else {
count += 3;
}
}
return count;
}
/**
* Read the given byte buffer from its current position to its limit into a byte array.
* @param buffer The buffer to read from
*/
public static byte[] toArray(ByteBuffer buffer) {
return toArray(buffer, 0, buffer.remaining());
}
/**
* Read a byte array from its current position given the size in the buffer
* @param buffer The buffer to read from
* @param size The number of bytes to read into the array
*/
public static byte[] toArray(ByteBuffer buffer, int size) {
return toArray(buffer, 0, size);
}
/**
* Convert a ByteBuffer to a nullable array.
* @param buffer The buffer to convert
* @return The resulting array or null if the buffer is null
*/
public static byte[] toNullableArray(ByteBuffer buffer) {
return buffer == null ? null : toArray(buffer);
}
/**
* Wrap an array as a nullable ByteBuffer.
* @param array The nullable array to wrap
* @return The wrapping ByteBuffer or null if array is null
*/
public static ByteBuffer wrapNullable(byte[] array) {
return array == null ? null : ByteBuffer.wrap(array);
}
/**
* Read a byte array from the given offset and size in the buffer
* @param buffer The buffer to read from
* @param offset The offset relative to the current position of the buffer
* @param size The number of bytes to read into the array
*/
public static byte[] toArray(ByteBuffer buffer, int offset, int size) {
byte[] dest = new byte[size];
if (buffer.hasArray()) {
System.arraycopy(buffer.array(), buffer.position() + buffer.arrayOffset() + offset, dest, 0, size);
} else {
int pos = buffer.position();
buffer.position(pos + offset);
buffer.get(dest);
buffer.position(pos);
}
return dest;
}
/**
* Starting from the current position, read an integer indicating the size of the byte array to read,
* then read the array. Consumes the buffer: upon returning, the buffer's position is after the array
* that is returned.
* @param buffer The buffer to read a size-prefixed array from
* @return The array
*/
public static byte[] getNullableSizePrefixedArray(final ByteBuffer buffer) {
final int size = buffer.getInt();
return getNullableArray(buffer, size);
}
/**
* Read a byte array of the given size. Consumes the buffer: upon returning, the buffer's position
* is after the array that is returned.
* @param buffer The buffer to read a size-prefixed array from
* @param size The number of bytes to read out of the buffer
* @return The array
*/
public static byte[] getNullableArray(final ByteBuffer buffer, final int size) {
if (size > buffer.remaining()) {
// preemptively throw this when the read is doomed to fail, so we don't have to allocate the array.
throw new BufferUnderflowException();
}
final byte[] oldBytes = size == -1 ? null : new byte[size];
if (oldBytes != null) {
buffer.get(oldBytes);
}
return oldBytes;
}
/**
* Returns a copy of src byte array
* @param src The byte array to copy
* @return The copy
*/
public static byte[] copyArray(byte[] src) {
return Arrays.copyOf(src, src.length);
}
/**
* Compares two character arrays for equality using a constant-time algorithm, which is needed
* for comparing passwords. Two arrays are equal if they have the same length and all
* characters at corresponding positions are equal.
*
* All characters in the first array are examined to determine equality.
* The calculation time depends only on the length of this first character array; it does not
* depend on the length of the second character array or the contents of either array.
*
* @param first the first array to compare
* @param second the second array to compare
* @return true if the arrays are equal, or false otherwise
*/
public static boolean isEqualConstantTime(char[] first, char[] second) {
if (first == second) {
return true;
}
if (first == null || second == null) {
return false;
}
if (second.length == 0) {
return first.length == 0;
}
// time-constant comparison that always compares all characters in first array
boolean matches = first.length == second.length;
for (int i = 0; i < first.length; ++i) {
int j = i < second.length ? i : 0;
if (first[i] != second[j]) {
matches = false;
}
}
return matches;
}
/**
* Sleep for a bit
* @param ms The duration of the sleep
*/
public static void sleep(long ms) {
try {
Thread.sleep(ms);
} catch (InterruptedException e) {
// this is okay, we just wake up early
Thread.currentThread().interrupt();
}
}
/**
* Instantiate the class
*/
public static <T> T newInstance(Class<T> c) {
if (c == null)
throw new KafkaException("class cannot be null");
try {
return c.getDeclaredConstructor().newInstance();
} catch (NoSuchMethodException e) {
throw new KafkaException("Could not find a public no-argument constructor for " + c.getName(), e);
} catch (ReflectiveOperationException | RuntimeException e) {
throw new KafkaException("Could not instantiate class " + c.getName(), e);
}
}
/**
* Look up the class by name and instantiate it.
* @param klass class name
* @param base super class of the class to be instantiated
* @param <T> the type of the base class
* @return the new instance
*/
public static <T> T newInstance(String klass, Class<T> base) throws ClassNotFoundException {
return Utils.newInstance(loadClass(klass, base));
}
/**
* Look up a class by name.
* @param klass class name
* @param base super class of the class for verification
* @param <T> the type of the base class
* @return the new class
*/
public static <T> Class<? extends T> loadClass(String klass, Class<T> base) throws ClassNotFoundException {
return Class.forName(klass, true, Utils.getContextOrKafkaClassLoader()).asSubclass(base);
}
/**
* Cast {@code klass} to {@code base} and instantiate it.
* @param klass The class to instantiate
* @param base A know baseclass of klass.
* @param <T> the type of the base class
* @throws ClassCastException If {@code klass} is not a subclass of {@code base}.
* @return the new instance.
*/
public static <T> T newInstance(Class<?> klass, Class<T> base) {
return Utils.newInstance(klass.asSubclass(base));
}
/**
* Construct a new object using a class name and parameters.
*
* @param className The full name of the class to construct.
* @param params A sequence of (type, object) elements.
* @param <T> The type of object to construct.
* @return The new object.
* @throws ClassNotFoundException If there was a problem constructing the object.
*/
public static <T> T newParameterizedInstance(String className, Object... params)
throws ClassNotFoundException {
Class<?>[] argTypes = new Class<?>[params.length / 2];
Object[] args = new Object[params.length / 2];
try {
Class<?> c = Class.forName(className, true, Utils.getContextOrKafkaClassLoader());
for (int i = 0; i < params.length / 2; i++) {
argTypes[i] = (Class<?>) params[2 * i];
args[i] = params[(2 * i) + 1];
}
@SuppressWarnings("unchecked")
Constructor<T> constructor = (Constructor<T>) c.getConstructor(argTypes);
return constructor.newInstance(args);
} catch (NoSuchMethodException e) {
throw new ClassNotFoundException(String.format("Failed to find " +
"constructor with %s for %s", Utils.join(argTypes, ", "), className), e);
} catch (InstantiationException e) {
throw new ClassNotFoundException(String.format("Failed to instantiate " +
"%s", className), e);
} catch (IllegalAccessException e) {
throw new ClassNotFoundException(String.format("Unable to access " +
"constructor of %s", className), e);
} catch (InvocationTargetException e) {
throw new KafkaException(String.format("The constructor of %s threw an exception", className), e.getCause());
}
}
/**
* Generates 32 bit murmur2 hash from byte array
* @param data byte array to hash
* @return 32 bit hash of the given array
*/
@SuppressWarnings("fallthrough")
public static int murmur2(final byte[] data) {
int length = data.length;
int seed = 0x9747b28c;
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
final int m = 0x5bd1e995;
final int r = 24;
// Initialize the hash to a random value
int h = seed ^ length;
int length4 = length / 4;
for (int i = 0; i < length4; i++) {
final int i4 = i * 4;
int k = (data[i4 + 0] & 0xff) + ((data[i4 + 1] & 0xff) << 8) + ((data[i4 + 2] & 0xff) << 16) + ((data[i4 + 3] & 0xff) << 24);
k *= m;
k ^= k >>> r;
k *= m;
h *= m;
h ^= k;
}
// Handle the last few bytes of the input array
switch (length % 4) {
case 3:
h ^= (data[(length & ~3) + 2] & 0xff) << 16;
case 2:
h ^= (data[(length & ~3) + 1] & 0xff) << 8;
case 1:
h ^= data[length & ~3] & 0xff;
h *= m;
}
h ^= h >>> 13;
h *= m;
h ^= h >>> 15;
return h;
}
/**
* Extracts the hostname from a "host:port" address string.
* @param address address string to parse
* @return hostname or null if the given address is incorrect
*/
public static String getHost(String address) {
Matcher matcher = HOST_PORT_PATTERN.matcher(address);
return matcher.matches() ? matcher.group(1) : null;
}
/**
* Extracts the port number from a "host:port" address string.
* @param address address string to parse
* @return port number or null if the given address is incorrect
*/
public static Integer getPort(String address) {
Matcher matcher = HOST_PORT_PATTERN.matcher(address);
return matcher.matches() ? Integer.parseInt(matcher.group(2)) : null;
}
/**
* Basic validation of the supplied address. checks for valid characters
* @param address hostname string to validate
* @return true if address contains valid characters
*/
public static boolean validHostPattern(String address) {
return VALID_HOST_CHARACTERS.matcher(address).matches();
}
/**
* Formats hostname and port number as a "host:port" address string,
* surrounding IPv6 addresses with braces '[', ']'
* @param host hostname
* @param port port number
* @return address string
*/
public static String formatAddress(String host, Integer port) {
return host.contains(":")
? "[" + host + "]:" + port // IPv6
: host + ":" + port;
}
/**
* Formats a byte number as a human readable String ("3.2 MB")
* @param bytes some size in bytes
* @return
*/
public static String formatBytes(long bytes) {
if (bytes < 0) {
return String.valueOf(bytes);
}
double asDouble = (double) bytes;
int ordinal = (int) Math.floor(Math.log(asDouble) / Math.log(1024.0));
double scale = Math.pow(1024.0, ordinal);
double scaled = asDouble / scale;
String formatted = TWO_DIGIT_FORMAT.format(scaled);
try {
return formatted + " " + BYTE_SCALE_SUFFIXES[ordinal];
} catch (IndexOutOfBoundsException e) {
//huge number?
return String.valueOf(asDouble);
}
}
/**
* Create a string representation of an array joined by the given separator
* @param strs The array of items
* @param separator The separator
* @return The string representation.
*/
public static <T> String join(T[] strs, String separator) {
return join(Arrays.asList(strs), separator);
}
/**
* Create a string representation of a collection joined by the given separator
* @param collection The list of items
* @param separator The separator
* @return The string representation.
*/
public static <T> String join(Collection<T> collection, String separator) {
Objects.requireNonNull(collection);
return mkString(collection.stream(), "", "", separator);
}
/**
* Create a string representation of a stream surrounded by `begin` and `end` and joined by `separator`.
*
* @return The string representation.
*/
public static <T> String mkString(Stream<T> stream, String begin, String end, String separator) {
Objects.requireNonNull(stream);
StringBuilder sb = new StringBuilder();
sb.append(begin);
Iterator<T> iter = stream.iterator();
while (iter.hasNext()) {
sb.append(iter.next());
if (iter.hasNext())
sb.append(separator);
}
sb.append(end);
return sb.toString();
}
/**
* Converts a {@code Map} class into a string, concatenating keys and values
* Example:
* {@code mkString({ key: "hello", keyTwo: "hi" }, "|START|", "|END|", "=", ",")
* => "|START|key=hello,keyTwo=hi|END|"}
*/
public static <K, V> String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator) {
StringBuilder bld = new StringBuilder();
bld.append(begin);
String prefix = "";
for (Map.Entry<K, V> entry : map.entrySet()) {
bld.append(prefix).append(entry.getKey()).
append(keyValueSeparator).append(entry.getValue());
prefix = elementSeparator;
}
bld.append(end);
return bld.toString();
}
/**
* Converts an extensions string into a {@code Map<String, String>}.
*
* Example:
* {@code parseMap("key=hey,keyTwo=hi,keyThree=hello", "=", ",") => { key: "hey", keyTwo: "hi", keyThree: "hello" }}
*
*/
public static Map<String, String> parseMap(String mapStr, String keyValueSeparator, String elementSeparator) {
Map<String, String> map = new HashMap<>();
if (!mapStr.isEmpty()) {
String[] attrvals = mapStr.split(elementSeparator);
for (String attrval : attrvals) {
String[] array = attrval.split(keyValueSeparator, 2);
map.put(array[0], array[1]);
}
}
return map;
}
/**
* Read a properties file from the given path
* @param filename The path of the file to read
* @return the loaded properties
*/
public static Properties loadProps(String filename) throws IOException {
return loadProps(filename, null);
}
/**
* Read a properties file from the given path
* @param filename The path of the file to read
* @param onlyIncludeKeys When non-null, only return values associated with these keys and ignore all others
* @return the loaded properties
*/
public static Properties loadProps(String filename, List<String> onlyIncludeKeys) throws IOException {
Properties props = new Properties();
if (filename != null) {
try (InputStream propStream = Files.newInputStream(Paths.get(filename))) {
props.load(propStream);
}
} else {
System.out.println("Did not load any properties since the property file is not specified");
}
if (onlyIncludeKeys == null || onlyIncludeKeys.isEmpty())
return props;
Properties requestedProps = new Properties();
onlyIncludeKeys.forEach(key -> {
String value = props.getProperty(key);
if (value != null)
requestedProps.setProperty(key, value);
});
return requestedProps;
}
/**
* Converts a Properties object to a Map<String, String>, calling {@link #toString} to ensure all keys and values
* are Strings.
*/
public static Map<String, String> propsToStringMap(Properties props) {
Map<String, String> result = new HashMap<>();
for (Map.Entry<Object, Object> entry : props.entrySet())
result.put(entry.getKey().toString(), entry.getValue().toString());
return result;
}
/**
* Get the stack trace from an exception as a string
*/
public static String stackTrace(Throwable e) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
return sw.toString();
}
/**
* Read a buffer into a Byte array for the given offset and length
*/
public static byte[] readBytes(ByteBuffer buffer, int offset, int length) {
byte[] dest = new byte[length];
if (buffer.hasArray()) {
System.arraycopy(buffer.array(), buffer.arrayOffset() + offset, dest, 0, length);
} else {
buffer.mark();
buffer.position(offset);
buffer.get(dest);
buffer.reset();
}
return dest;
}
/**
* Read the given byte buffer into a Byte array
*/
public static byte[] readBytes(ByteBuffer buffer) {
return Utils.readBytes(buffer, 0, buffer.limit());
}
/**
* Read a file as string and return the content. The file is treated as a stream and no seek is performed.
* This allows the program to read from a regular file as well as from a pipe/fifo.
*/
public static String readFileAsString(String path) throws IOException {
try {
byte[] allBytes = Files.readAllBytes(Paths.get(path));
return new String(allBytes, StandardCharsets.UTF_8);
} catch (IOException ex) {
throw new IOException("Unable to read file " + path, ex);
}
}
/**
* Check if the given ByteBuffer capacity
* @param existingBuffer ByteBuffer capacity to check
* @param newLength new length for the ByteBuffer.
* returns ByteBuffer
*/
public static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength) {
if (newLength > existingBuffer.capacity()) {
ByteBuffer newBuffer = ByteBuffer.allocate(newLength);
existingBuffer.flip();
newBuffer.put(existingBuffer);
return newBuffer;
}
return existingBuffer;
}
/**
* Creates a set
* @param elems the elements
* @param <T> the type of element
* @return Set
*/
@SafeVarargs
public static <T> Set<T> mkSet(T... elems) {
Set<T> result = new HashSet<>((int) (elems.length / 0.75) + 1);
for (T elem : elems)
result.add(elem);
return result;
}
/**
* Creates a sorted set
* @param elems the elements
* @param <T> the type of element, must be comparable
* @return SortedSet
*/
@SafeVarargs
public static <T extends Comparable<T>> SortedSet<T> mkSortedSet(T... elems) {
SortedSet<T> result = new TreeSet<>();
for (T elem : elems)
result.add(elem);
return result;
}
/**
* Creates a map entry (for use with {@link Utils#mkMap(java.util.Map.Entry[])})
*
* @param k The key
* @param v The value
* @param <K> The key type
* @param <V> The value type
* @return An entry
*/
public static <K, V> Map.Entry<K, V> mkEntry(final K k, final V v) {
return new AbstractMap.SimpleEntry<>(k, v);
}
/**
* Creates a map from a sequence of entries
*
* @param entries The entries to map
* @param <K> The key type
* @param <V> The value type
* @return A map
*/
@SafeVarargs
public static <K, V> Map<K, V> mkMap(final Map.Entry<K, V>... entries) {
final LinkedHashMap<K, V> result = new LinkedHashMap<>();
for (final Map.Entry<K, V> entry : entries) {
result.put(entry.getKey(), entry.getValue());
}
return result;
}
/**
* Creates a {@link Properties} from a map
*
* @param properties A map of properties to add
* @return The properties object
*/
public static Properties mkProperties(final Map<String, String> properties) {
final Properties result = new Properties();
for (final Map.Entry<String, String> entry : properties.entrySet()) {
result.setProperty(entry.getKey(), entry.getValue());
}
return result;
}
/**
* Creates a {@link Properties} from a map
*
* @param properties A map of properties to add
* @return The properties object
*/
public static Properties mkObjectProperties(final Map<String, Object> properties) {
final Properties result = new Properties();
for (final Map.Entry<String, Object> entry : properties.entrySet()) {
result.put(entry.getKey(), entry.getValue());
}
return result;
}
/**
* Recursively delete the given file/directory and any subfiles (if any exist)
*
* @param rootFile The root file at which to begin deleting
*/
public static void delete(final File rootFile) throws IOException {
if (rootFile == null)
return;
Files.walkFileTree(rootFile.toPath(), new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFileFailed(Path path, IOException exc) throws IOException {
// If the root path did not exist, ignore the error; otherwise throw it.
if (exc instanceof NoSuchFileException && path.toFile().equals(rootFile))
return FileVisitResult.TERMINATE;
throw exc;
}
@Override
public FileVisitResult visitFile(Path path, BasicFileAttributes attrs) throws IOException {
Files.delete(path);
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path path, IOException exc) throws IOException {
// KAFKA-8999: if there's an exception thrown previously already, we should throw it
if (exc != null) {
throw exc;
}
Files.delete(path);
return FileVisitResult.CONTINUE;
}
});
}
/**
* Returns an empty collection if this list is null
* @param other
* @return
*/
public static <T> List<T> safe(List<T> other) {
return other == null ? Collections.emptyList() : other;
}
/**
* Get the ClassLoader which loaded Kafka.
*/
public static ClassLoader getKafkaClassLoader() {
return Utils.class.getClassLoader();
}
/**
* Get the Context ClassLoader on this thread or, if not present, the ClassLoader that
* loaded Kafka.
*
* This should be used whenever passing a ClassLoader to Class.forName
*/
public static ClassLoader getContextOrKafkaClassLoader() {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null)
return getKafkaClassLoader();
else
return cl;
}
/**
* Attempts to move source to target atomically and falls back to a non-atomic move if it fails.
* This function also flushes the parent directory to guarantee crash consistency.
*
* @throws IOException if both atomic and non-atomic moves fail, or parent dir flush fails.
*/
public static void atomicMoveWithFallback(Path source, Path target) throws IOException {
atomicMoveWithFallback(source, target, true);
}
/**
* Attempts to move source to target atomically and falls back to a non-atomic move if it fails.
* This function allows callers to decide whether to flush the parent directory. This is needed
* when a sequence of atomicMoveWithFallback is called for the same directory and we don't want
* to repeatedly flush the same parent directory.
*
* @throws IOException if both atomic and non-atomic moves fail,
* or parent dir flush fails if needFlushParentDir is true.
*/
public static void atomicMoveWithFallback(Path source, Path target, boolean needFlushParentDir) throws IOException {
try {
Files.move(source, target, StandardCopyOption.ATOMIC_MOVE);
} catch (IOException outer) {
try {
Files.move(source, target, StandardCopyOption.REPLACE_EXISTING);
log.debug("Non-atomic move of {} to {} succeeded after atomic move failed due to {}", source, target,
outer.getMessage());
} catch (IOException inner) {
inner.addSuppressed(outer);
throw inner;
}
} finally {
if (needFlushParentDir) {
flushDir(target.toAbsolutePath().normalize().getParent());
}
}
}
/**
* Flushes dirty directories to guarantee crash consistency.
*
* Note: We don't fsync directories on Windows OS because otherwise it'll throw AccessDeniedException (KAFKA-13391)
*
* @throws IOException if flushing the directory fails.
*/
public static void flushDir(Path path) throws IOException {
if (path != null && !OperatingSystem.IS_WINDOWS && !OperatingSystem.IS_ZOS) {
try (FileChannel dir = FileChannel.open(path, StandardOpenOption.READ)) {
dir.force(true);
}
}
}
/**
* Closes all the provided closeables.
* @throws IOException if any of the close methods throws an IOException.
* The first IOException is thrown with subsequent exceptions
* added as suppressed exceptions.
*/
public static void closeAll(Closeable... closeables) throws IOException {
IOException exception = null;
for (Closeable closeable : closeables) {
try {
if (closeable != null)
closeable.close();
} catch (IOException e) {
if (exception != null)
exception.addSuppressed(e);
else
exception = e;
}
}
if (exception != null)
throw exception;
}
public static void swallow(final Logger log, final Level level, final String what, final Runnable code) {
swallow(log, level, what, code, null);
}
/**
* Run the supplied code. If an exception is thrown, it is swallowed and registered to the firstException parameter.
*/
public static void swallow(final Logger log, final Level level, final String what, final Runnable code,
final AtomicReference<Throwable> firstException) {
if (code != null) {
try {
code.run();
} catch (Throwable t) {
switch (level) {
case INFO:
log.info(what, t);
break;
case DEBUG:
log.debug(what, t);
break;
case ERROR:
log.error(what, t);
break;
case TRACE:
log.trace(what, t);
break;
case WARN:
default:
log.warn(what, t);
}
if (firstException != null)
firstException.compareAndSet(null, t);
}
}
}
/**
* An {@link AutoCloseable} interface without a throws clause in the signature
*
* This is used with lambda expressions in try-with-resources clauses
* to avoid casting un-checked exceptions to checked exceptions unnecessarily.
*/
@FunctionalInterface
public interface UncheckedCloseable extends AutoCloseable {
@Override
void close();
}
/**
* Closes {@code closeable} and if an exception is thrown, it is logged at the WARN level.
* <b>Be cautious when passing method references as an argument.</b> For example:
* <p>
* {@code closeQuietly(task::stop, "source task");}
* <p>
* Although this method gracefully handles null {@link AutoCloseable} objects, attempts to take a method
* reference from a null object will result in a {@link NullPointerException}. In the example code above,
* it would be the caller's responsibility to ensure that {@code task} was non-null before attempting to
* use a method reference from it.
*/
public static void closeQuietly(AutoCloseable closeable, String name) {
if (closeable != null) {
try {
closeable.close();
} catch (Throwable t) {
log.warn("Failed to close {} with type {}", name, closeable.getClass().getName(), t);
}
}
}
/**
* Closes {@code closeable} and if an exception is thrown, it is registered to the firstException parameter.
* <b>Be cautious when passing method references as an argument.</b> For example:
* <p>
* {@code closeQuietly(task::stop, "source task");}
* <p>
* Although this method gracefully handles null {@link AutoCloseable} objects, attempts to take a method
* reference from a null object will result in a {@link NullPointerException}. In the example code above,
* it would be the caller's responsibility to ensure that {@code task} was non-null before attempting to
* use a method reference from it.
*/
public static void closeQuietly(AutoCloseable closeable, String name, AtomicReference<Throwable> firstException) {
if (closeable != null) {
try {
closeable.close();
} catch (Throwable t) {
firstException.compareAndSet(null, t);
log.error("Failed to close {} with type {}", name, closeable.getClass().getName(), t);
}
}
}
/**
* close all closable objects even if one of them throws exception.
* @param firstException keeps the first exception
* @param name message of closing those objects
* @param closeables closable objects
*/
public static void closeAllQuietly(AtomicReference<Throwable> firstException, String name, AutoCloseable... closeables) {
for (AutoCloseable closeable : closeables) closeQuietly(closeable, name, firstException);
}
/**
* A cheap way to deterministically convert a number to a positive value. When the input is
* positive, the original value is returned. When the input number is negative, the returned
* positive value is the original value bit AND against 0x7fffffff which is not its absolute
* value.
*
* Note: changing this method in the future will possibly cause partition selection not to be
* compatible with the existing messages already placed on a partition since it is used
* in producer's partition selection logic {@link org.apache.kafka.clients.producer.KafkaProducer}
*
* @param number a given number
* @return a positive number.
*/
public static int toPositive(int number) {
return number & 0x7fffffff;
}
/**
* Read a size-delimited byte buffer starting at the given offset.
* @param buffer Buffer containing the size and data
* @param start Offset in the buffer to read from
* @return A slice of the buffer containing only the delimited data (excluding the size)
*/
public static ByteBuffer sizeDelimited(ByteBuffer buffer, int start) {
int size = buffer.getInt(start);
if (size < 0) {
return null;
} else {
ByteBuffer b = buffer.duplicate();
b.position(start + 4);
b = b.slice();
b.limit(size);
b.rewind();
return b;
}
}
/**
* Read data from the channel to the given byte buffer until there are no bytes remaining in the buffer. If the end
* of the file is reached while there are bytes remaining in the buffer, an EOFException is thrown.
*
* @param channel File channel containing the data to read from
* @param destinationBuffer The buffer into which bytes are to be transferred
* @param position The file position at which the transfer is to begin; it must be non-negative
* @param description A description of what is being read, this will be included in the EOFException if it is thrown
*
* @throws IllegalArgumentException If position is negative
* @throws EOFException If the end of the file is reached while there are remaining bytes in the destination buffer
* @throws IOException If an I/O error occurs, see {@link FileChannel#read(ByteBuffer, long)} for details on the
* possible exceptions
*/
public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description) throws IOException {
if (position < 0) {
throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position);
}
int expectedReadBytes = destinationBuffer.remaining();
readFully(channel, destinationBuffer, position);
if (destinationBuffer.hasRemaining()) {
throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " +
"but reached end of file after reading %d bytes. Started read from position %d.",
description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position));
}
}
/**
* Read data from the channel to the given byte buffer until there are no bytes remaining in the buffer or the end
* of the file has been reached.
*
* @param channel File channel containing the data to read from
* @param destinationBuffer The buffer into which bytes are to be transferred
* @param position The file position at which the transfer is to begin; it must be non-negative
*
* @throws IllegalArgumentException If position is negative
* @throws IOException If an I/O error occurs, see {@link FileChannel#read(ByteBuffer, long)} for details on the
* possible exceptions
*/
public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException {
if (position < 0) {
throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position);
}
long currentPosition = position;
int bytesRead;
do {
bytesRead = channel.read(destinationBuffer, currentPosition);
currentPosition += bytesRead;
} while (bytesRead != -1 && destinationBuffer.hasRemaining());
}
/**
* Read data from the input stream to the given byte buffer until there are no bytes remaining in the buffer or the
* end of the stream has been reached.
*
* @param inputStream Input stream to read from
* @param destinationBuffer The buffer into which bytes are to be transferred (it must be backed by an array)
*
* @throws IOException If an I/O error occurs
*/
public static void readFully(InputStream inputStream, ByteBuffer destinationBuffer) throws IOException {
if (!destinationBuffer.hasArray())
throw new IllegalArgumentException("destinationBuffer must be backed by an array");
int initialOffset = destinationBuffer.arrayOffset() + destinationBuffer.position();
byte[] array = destinationBuffer.array();
int length = destinationBuffer.remaining();
int totalBytesRead = 0;
do {
int bytesRead = inputStream.read(array, initialOffset + totalBytesRead, length - totalBytesRead);
if (bytesRead == -1)
break;
totalBytesRead += bytesRead;
} while (length > totalBytesRead);
destinationBuffer.position(destinationBuffer.position() + totalBytesRead);
}
public static void writeFully(FileChannel channel, ByteBuffer sourceBuffer) throws IOException {
while (sourceBuffer.hasRemaining())
channel.write(sourceBuffer);
}
/**
* Trying to write data in source buffer to a {@link TransferableChannel}, we may need to call this method multiple
* times since this method doesn't ensure the data in the source buffer can be fully written to the destination channel.
*
* @param destChannel The destination channel
* @param position From which the source buffer will be written
* @param length The max size of bytes can be written
* @param sourceBuffer The source buffer
*
* @return The length of the actual written data
* @throws IOException If an I/O error occurs
*/
public static long tryWriteTo(TransferableChannel destChannel,
int position,
int length,
ByteBuffer sourceBuffer) throws IOException {
ByteBuffer dup = sourceBuffer.duplicate();
dup.position(position);
dup.limit(position + length);
return destChannel.write(dup);
}
/**
* Write the contents of a buffer to an output stream. The bytes are copied from the current position
* in the buffer.
* @param out The output to write to
* @param buffer The buffer to write from
* @param length The number of bytes to write
* @throws IOException For any errors writing to the output
*/
public static void writeTo(DataOutput out, ByteBuffer buffer, int length) throws IOException {
if (buffer.hasArray()) {
out.write(buffer.array(), buffer.position() + buffer.arrayOffset(), length);
} else {
int pos = buffer.position();
for (int i = pos; i < length + pos; i++)
out.writeByte(buffer.get(i));
}
}
public static <T> List<T> toList(Iterable<T> iterable) {
return toList(iterable.iterator());
}
public static <T> List<T> toList(Iterator<T> iterator) {
List<T> res = new ArrayList<>();
while (iterator.hasNext())
res.add(iterator.next());
return res;
}
public static <T> List<T> toList(Iterator<T> iterator, Predicate<T> predicate) {
List<T> res = new ArrayList<>();
while (iterator.hasNext()) {
T e = iterator.next();
if (predicate.test(e)) {
res.add(e);
}
}
return res;
}
public static int to32BitField(final Set<Byte> bytes) {
int value = 0;
for (final byte b : bytes)
value |= 1 << checkRange(b);
return value;
}
private static byte checkRange(final byte i) {
if (i > 31)
throw new IllegalArgumentException("out of range: i>31, i = " + i);
if (i < 0)
throw new IllegalArgumentException("out of range: i<0, i = " + i);
return i;
}
public static Set<Byte> from32BitField(final int intValue) {
Set<Byte> result = new HashSet<>();
for (int itr = intValue, count = 0; itr != 0; itr >>>= 1) {
if ((itr & 1) != 0)
result.add((byte) count);
count++;
}
return result;
}
/**
* A Collector that offers two kinds of convenience:
* 1. You can specify the concrete type of the returned Map
* 2. You can turn a stream of Entries directly into a Map without having to mess with a key function
* and a value function. In particular, this is handy if all you need to do is apply a filter to a Map's entries.
*
*
* One thing to be wary of: These types are too "distant" for IDE type checkers to warn you if you
* try to do something like build a TreeMap of non-Comparable elements. You'd get a runtime exception for that.
*
* @param mapSupplier The constructor for your concrete map type.
* @param <K> The Map key type
* @param <V> The Map value type
* @param <M> The type of the Map itself.
* @return new Collector<Map.Entry<K, V>, M, M>
*/
public static <K, V, M extends Map<K, V>> Collector<Map.Entry<K, V>, M, M> entriesToMap(final Supplier<M> mapSupplier) {
return new Collector<Map.Entry<K, V>, M, M>() {
@Override
public Supplier<M> supplier() {
return mapSupplier;
}
@Override
public BiConsumer<M, Map.Entry<K, V>> accumulator() {
return (map, entry) -> map.put(entry.getKey(), entry.getValue());
}
@Override
public BinaryOperator<M> combiner() {
return (map, map2) -> {
map.putAll(map2);
return map;
};
}
@Override
public Function<M, M> finisher() {
return map -> map;
}
@Override
public Set<Characteristics> characteristics() {
return EnumSet.of(Characteristics.UNORDERED, Characteristics.IDENTITY_FINISH);
}
};
}
@SafeVarargs
public static <E> Set<E> union(final Supplier<Set<E>> constructor, final Set<E>... set) {
final Set<E> result = constructor.get();
for (final Set<E> s : set) {
result.addAll(s);
}
return result;
}
@SafeVarargs
public static <E> Set<E> intersection(final Supplier<Set<E>> constructor, final Set<E> first, final Set<E>... set) {
final Set<E> result = constructor.get();
result.addAll(first);
for (final Set<E> s : set) {
result.retainAll(s);
}
return result;
}
public static <E> Set<E> diff(final Supplier<Set<E>> constructor, final Set<E> left, final Set<E> right) {
final Set<E> result = constructor.get();
result.addAll(left);
result.removeAll(right);
return result;
}
public static <K, V> Map<K, V> filterMap(final Map<K, V> map, final Predicate<Entry<K, V>> filterPredicate) {
return map.entrySet().stream().filter(filterPredicate).collect(Collectors.toMap(Entry::getKey, Entry::getValue));
}
/**
* Convert a properties to map. All keys in properties must be string type. Otherwise, a ConfigException is thrown.
* @param properties to be converted
* @return a map including all elements in properties
*/
public static Map<String, Object> propsToMap(Properties properties) {
Map<String, Object> map = new HashMap<>(properties.size());
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
if (entry.getKey() instanceof String) {
String k = (String) entry.getKey();
map.put(k, properties.get(k));
} else {
throw new ConfigException(entry.getKey().toString(), entry.getValue(), "Key must be a string.");
}
}
return map;
}
/**
* Convert timestamp to an epoch value
* @param timestamp the timestamp to be converted, the accepted formats are:
* (1) yyyy-MM-dd'T'HH:mm:ss.SSS, ex: 2020-11-10T16:51:38.198
* (2) yyyy-MM-dd'T'HH:mm:ss.SSSZ, ex: 2020-11-10T16:51:38.198+0800
* (3) yyyy-MM-dd'T'HH:mm:ss.SSSX, ex: 2020-11-10T16:51:38.198+08
* (4) yyyy-MM-dd'T'HH:mm:ss.SSSXX, ex: 2020-11-10T16:51:38.198+0800
* (5) yyyy-MM-dd'T'HH:mm:ss.SSSXXX, ex: 2020-11-10T16:51:38.198+08:00
*
* @return epoch value of a given timestamp (i.e. the number of milliseconds since January 1, 1970, 00:00:00 GMT)
* @throws ParseException for timestamp that doesn't follow ISO8601 format or the format is not expected
*/
public static long getDateTime(String timestamp) throws ParseException, IllegalArgumentException {
if (timestamp == null) {
throw new IllegalArgumentException("Error parsing timestamp with null value");
}
final String[] timestampParts = timestamp.split("T");
if (timestampParts.length < 2) {
throw new ParseException("Error parsing timestamp. It does not contain a 'T' according to ISO8601 format", timestamp.length());
}
final String secondPart = timestampParts[1];
if (!(secondPart.contains("+") || secondPart.contains("-") || secondPart.contains("Z"))) {
timestamp = timestamp + "Z";
}
SimpleDateFormat simpleDateFormat = new SimpleDateFormat();
// strictly parsing the date/time format
simpleDateFormat.setLenient(false);
try {
simpleDateFormat.applyPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX");
final Date date = simpleDateFormat.parse(timestamp);
return date.getTime();
} catch (final ParseException e) {
simpleDateFormat.applyPattern("yyyy-MM-dd'T'HH:mm:ss.SSSX");
final Date date = simpleDateFormat.parse(timestamp);
return date.getTime();
}
}
@SuppressWarnings("unchecked")
public static <S> Iterator<S> covariantCast(Iterator<? extends S> iterator) {
return (Iterator<S>) iterator;
}
/**
* Checks if a string is null, empty or whitespace only.
* @param str a string to be checked
* @return true if the string is null, empty or whitespace only; otherwise, return false.
*/
public static boolean isBlank(String str) {
return str == null || str.trim().isEmpty();
}
/**
* Get an array containing all of the {@link Object#toString string representations} of a given enumerable type.
* @param enumClass the enum class; may not be null
* @return an array with the names of every value for the enum class; never null, but may be empty
* if there are no values defined for the enum
*/
public static String[] enumOptions(Class<? extends Enum<?>> enumClass) {
Objects.requireNonNull(enumClass);
if (!enumClass.isEnum()) {
throw new IllegalArgumentException("Class " + enumClass + " is not an enumerable type");
}
return Stream.of(enumClass.getEnumConstants())
.map(Object::toString)
.toArray(String[]::new);
}
/**
* Convert time instant to readable string for logging
* @param timestamp the timestamp of the instant to be converted.
*
* @return string value of a given timestamp in the format "yyyy-MM-dd HH:mm:ss,SSS"
*/
public static String toLogDateTimeFormat(long timestamp) {
final DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss,SSS XXX");
return Instant.ofEpochMilli(timestamp).atZone(ZoneId.systemDefault()).format(dateTimeFormatter);
}
/**
* Replace the given string suffix with the new suffix. If the string doesn't end with the given suffix throw an exception.
*/
public static String replaceSuffix(String str, String oldSuffix, String newSuffix) {
if (!str.endsWith(oldSuffix))
throw new IllegalArgumentException("Expected string to end with " + oldSuffix + " but string is " + str);
return str.substring(0, str.length() - oldSuffix.length()) + newSuffix;
}
/**
* Find all key/value pairs whose keys begin with the given prefix, and remove that prefix from all
* resulting keys.
* @param map the map to filter key/value pairs from
* @param prefix the prefix to search keys for
* @return a {@link Map} containing a key/value pair for every key/value pair in the {@code map}
* parameter whose key begins with the given {@code prefix} and whose corresponding keys have
* the prefix stripped from them; may be empty, but never null
* @param <V> the type of values stored in the map
*/
public static <V> Map<String, V> entriesWithPrefix(Map<String, V> map, String prefix) {
return entriesWithPrefix(map, prefix, true);
}
/**
* Find all key/value pairs whose keys begin with the given prefix, optionally removing that prefix
* from all resulting keys.
* @param map the map to filter key/value pairs from
* @param prefix the prefix to search keys for
* @param strip whether the keys of the returned map should not include the prefix
* @return a {@link Map} containing a key/value pair for every key/value pair in the {@code map}
* parameter whose key begins with the given {@code prefix}; may be empty, but never null
* @param <V> the type of values stored in the map
*/
public static <V> Map<String, V> entriesWithPrefix(Map<String, V> map, String prefix, boolean strip) {
Map<String, V> result = new HashMap<>();
for (Map.Entry<String, V> entry : map.entrySet()) {
if (entry.getKey().startsWith(prefix) && entry.getKey().length() > prefix.length()) {
if (strip)
result.put(entry.getKey().substring(prefix.length()), entry.getValue());
else
result.put(entry.getKey(), entry.getValue());
}
}
return result;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/utils/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides common utilities for Kafka server and clients.
* <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
*/
package org.apache.kafka.common.utils; |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/authorizer/AclCreateResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.authorizer;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.errors.ApiException;
import java.util.Optional;
@InterfaceStability.Evolving
public class AclCreateResult {
public static final AclCreateResult SUCCESS = new AclCreateResult();
private final ApiException exception;
private AclCreateResult() {
this(null);
}
public AclCreateResult(ApiException exception) {
this.exception = exception;
}
/**
* Returns any exception during create. If exception is empty, the request has succeeded.
*/
public Optional<ApiException> exception() {
return exception == null ? Optional.empty() : Optional.of(exception);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/authorizer/AclDeleteResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.authorizer;
import java.util.Collections;
import java.util.Collection;
import java.util.Optional;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.errors.ApiException;
@InterfaceStability.Evolving
public class AclDeleteResult {
private final ApiException exception;
private final Collection<AclBindingDeleteResult> aclBindingDeleteResults;
public AclDeleteResult(ApiException exception) {
this(Collections.emptySet(), exception);
}
public AclDeleteResult(Collection<AclBindingDeleteResult> deleteResults) {
this(deleteResults, null);
}
private AclDeleteResult(Collection<AclBindingDeleteResult> deleteResults, ApiException exception) {
this.aclBindingDeleteResults = deleteResults;
this.exception = exception;
}
/**
* Returns any exception while attempting to match ACL filter to delete ACLs.
* If exception is empty, filtering has succeeded. See {@link #aclBindingDeleteResults()}
* for deletion results for each filter.
*/
public Optional<ApiException> exception() {
return exception == null ? Optional.empty() : Optional.of(exception);
}
/**
* Returns delete result for each matching ACL binding.
*/
public Collection<AclBindingDeleteResult> aclBindingDeleteResults() {
return aclBindingDeleteResults;
}
/**
* Delete result for each ACL binding that matched a delete filter.
*/
public static class AclBindingDeleteResult {
private final AclBinding aclBinding;
private final ApiException exception;
public AclBindingDeleteResult(AclBinding aclBinding) {
this(aclBinding, null);
}
public AclBindingDeleteResult(AclBinding aclBinding, ApiException exception) {
this.aclBinding = aclBinding;
this.exception = exception;
}
/**
* Returns ACL binding that matched the delete filter. If {@link #exception()} is
* empty, the ACL binding was successfully deleted.
*/
public AclBinding aclBinding() {
return aclBinding;
}
/**
* Returns any exception that resulted in failure to delete ACL binding.
* If exception is empty, the ACL binding was successfully deleted.
*/
public Optional<ApiException> exception() {
return exception == null ? Optional.empty() : Optional.of(exception);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/authorizer/Action.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.authorizer;
import java.util.Objects;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.resource.ResourcePattern;
@InterfaceStability.Evolving
public class Action {
private final ResourcePattern resourcePattern;
private final AclOperation operation;
private final int resourceReferenceCount;
private final boolean logIfAllowed;
private final boolean logIfDenied;
/**
* @param operation non-null operation being performed
* @param resourcePattern non-null resource pattern on which this action is being performed
*/
public Action(AclOperation operation,
ResourcePattern resourcePattern,
int resourceReferenceCount,
boolean logIfAllowed,
boolean logIfDenied) {
this.operation = Objects.requireNonNull(operation, "operation can't be null");
this.resourcePattern = Objects.requireNonNull(resourcePattern, "resourcePattern can't be null");
this.logIfAllowed = logIfAllowed;
this.logIfDenied = logIfDenied;
this.resourceReferenceCount = resourceReferenceCount;
}
/**
* @return a non-null resource pattern on which this action is being performed
*/
public ResourcePattern resourcePattern() {
return resourcePattern;
}
/**
*
* @return a non-null operation being performed
*/
public AclOperation operation() {
return operation;
}
/**
* Indicates if audit logs tracking ALLOWED access should include this action if result is
* ALLOWED. The flag is true if access to a resource is granted while processing the request as a
* result of this authorization. The flag is false only for requests used to describe access where
* no operation on the resource is actually performed based on the authorization result.
*/
public boolean logIfAllowed() {
return logIfAllowed;
}
/**
* Indicates if audit logs tracking DENIED access should include this action if result is
* DENIED. The flag is true if access to a resource was explicitly requested and request
* is denied as a result of this authorization request. The flag is false if request was
* filtering out authorized resources (e.g. to subscribe to regex pattern). The flag is also
* false if this is an optional authorization where an alternative resource authorization is
* applied if this fails (e.g. Cluster:Create which is subsequently overridden by Topic:Create).
*/
public boolean logIfDenied() {
return logIfDenied;
}
/**
* Number of times the resource being authorized is referenced within the request. For example, a single
* request may reference `n` topic partitions of the same topic. Brokers will authorize the topic once
* with `resourceReferenceCount=n`. Authorizers may include the count in audit logs.
*/
public int resourceReferenceCount() {
return resourceReferenceCount;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Action)) {
return false;
}
Action that = (Action) o;
return Objects.equals(this.resourcePattern, that.resourcePattern) &&
Objects.equals(this.operation, that.operation) &&
this.resourceReferenceCount == that.resourceReferenceCount &&
this.logIfAllowed == that.logIfAllowed &&
this.logIfDenied == that.logIfDenied;
}
@Override
public int hashCode() {
return Objects.hash(resourcePattern, operation, resourceReferenceCount, logIfAllowed, logIfDenied);
}
@Override
public String toString() {
return "Action(" +
"resourcePattern='" + resourcePattern + '\'' +
", operation='" + operation + '\'' +
", resourceReferenceCount='" + resourceReferenceCount + '\'' +
", logIfAllowed='" + logIfAllowed + '\'' +
", logIfDenied='" + logIfDenied + '\'' +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/authorizer/AuthorizableRequestContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.authorizer;
import java.net.InetAddress;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.security.auth.SecurityProtocol;
/**
* Request context interface that provides data from request header as well as connection
* and authentication information to plugins.
*/
@InterfaceStability.Evolving
public interface AuthorizableRequestContext {
/**
* Returns name of listener on which request was received.
*/
String listenerName();
/**
* Returns the security protocol for the listener on which request was received.
*/
SecurityProtocol securityProtocol();
/**
* Returns authenticated principal for the connection on which request was received.
*/
KafkaPrincipal principal();
/**
* Returns client IP address from which request was sent.
*/
InetAddress clientAddress();
/**
* 16-bit API key of the request from the request header. See
* https://kafka.apache.org/protocol#protocol_api_keys for request types.
*/
int requestType();
/**
* Returns the request version from the request header.
*/
int requestVersion();
/**
* Returns the client id from the request header.
*/
String clientId();
/**
* Returns the correlation id from the request header.
*/
int correlationId();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/authorizer/AuthorizationResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.authorizer;
import org.apache.kafka.common.annotation.InterfaceStability;
@InterfaceStability.Evolving
public enum AuthorizationResult {
ALLOWED,
DENIED
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/authorizer/Authorizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.authorizer;
import org.apache.kafka.common.Configurable;
import org.apache.kafka.common.Endpoint;
import org.apache.kafka.common.acl.AccessControlEntryFilter;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourcePatternFilter;
import org.apache.kafka.common.resource.ResourceType;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.utils.SecurityUtils;
import java.io.Closeable;
import java.util.Collections;
import java.util.EnumMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletionStage;
/**
*
* Pluggable authorizer interface for Kafka brokers.
*
* Startup sequence in brokers:
* <ol>
* <li>Broker creates authorizer instance if configured in `authorizer.class.name`.</li>
* <li>Broker configures and starts authorizer instance. Authorizer implementation starts loading its metadata.</li>
* <li>Broker starts SocketServer to accept connections and process requests.</li>
* <li>For each listener, SocketServer waits for authorization metadata to be available in the
* authorizer before accepting connections. The future returned by {@link #start(AuthorizerServerInfo)}
* for each listener must return only when authorizer is ready to authorize requests on the listener.</li>
* <li>Broker accepts connections. For each connection, broker performs authentication and then accepts Kafka requests.
* For each request, broker invokes {@link #authorize(AuthorizableRequestContext, List)} to authorize
* actions performed by the request.</li>
* </ol>
*
* Authorizer implementation class may optionally implement @{@link org.apache.kafka.common.Reconfigurable}
* to enable dynamic reconfiguration without restarting the broker.
* <p>
* <b>Threading model:</b>
* <ul>
* <li>All authorizer operations including authorization and ACL updates must be thread-safe.</li>
* <li>ACL update methods are asynchronous. Implementations with low update latency may return a
* completed future using {@link java.util.concurrent.CompletableFuture#completedFuture(Object)}.
* This ensures that the request will be handled synchronously by the caller without using a
* purgatory to wait for the result. If ACL updates require remote communication which may block,
* return a future that is completed asynchronously when the remote operation completes. This enables
* the caller to process other requests on the request threads without blocking.</li>
* <li>Any threads or thread pools used for processing remote operations asynchronously can be started during
* {@link #start(AuthorizerServerInfo)}. These threads must be shutdown during {@link Authorizer#close()}.</li>
* </ul>
* </p>
*/
@InterfaceStability.Evolving
public interface Authorizer extends Configurable, Closeable {
/**
* Starts loading authorization metadata and returns futures that can be used to wait until
* metadata for authorizing requests on each listener is available. Each listener will be
* started only after its metadata is available and authorizer is ready to start authorizing
* requests on that listener.
*
* @param serverInfo Metadata for the broker including broker id and listener endpoints
* @return CompletionStage for each endpoint that completes when authorizer is ready to
* start authorizing requests on that listener.
*/
Map<Endpoint, ? extends CompletionStage<Void>> start(AuthorizerServerInfo serverInfo);
/**
* Authorizes the specified action. Additional metadata for the action is specified
* in `requestContext`.
* <p>
* This is a synchronous API designed for use with locally cached ACLs. Since this method is invoked on the
* request thread while processing each request, implementations of this method should avoid time-consuming
* remote communication that may block request threads.
*
* @param requestContext Request context including request type, security protocol and listener name
* @param actions Actions being authorized including resource and operation for each action
* @return List of authorization results for each action in the same order as the provided actions
*/
List<AuthorizationResult> authorize(AuthorizableRequestContext requestContext, List<Action> actions);
/**
* Creates new ACL bindings.
* <p>
* This is an asynchronous API that enables the caller to avoid blocking during the update. Implementations of this
* API can return completed futures using {@link java.util.concurrent.CompletableFuture#completedFuture(Object)}
* to process the update synchronously on the request thread.
*
* @param requestContext Request context if the ACL is being created by a broker to handle
* a client request to create ACLs. This may be null if ACLs are created directly in ZooKeeper
* using AclCommand.
* @param aclBindings ACL bindings to create
*
* @return Create result for each ACL binding in the same order as in the input list. Each result
* is returned as a CompletionStage that completes when the result is available.
*/
List<? extends CompletionStage<AclCreateResult>> createAcls(AuthorizableRequestContext requestContext, List<AclBinding> aclBindings);
/**
* Deletes all ACL bindings that match the provided filters.
* <p>
* This is an asynchronous API that enables the caller to avoid blocking during the update. Implementations of this
* API can return completed futures using {@link java.util.concurrent.CompletableFuture#completedFuture(Object)}
* to process the update synchronously on the request thread.
* <p>
* Refer to the authorizer implementation docs for details on concurrent update guarantees.
*
* @param requestContext Request context if the ACL is being deleted by a broker to handle
* a client request to delete ACLs. This may be null if ACLs are deleted directly in ZooKeeper
* using AclCommand.
* @param aclBindingFilters Filters to match ACL bindings that are to be deleted
*
* @return Delete result for each filter in the same order as in the input list.
* Each result indicates which ACL bindings were actually deleted as well as any
* bindings that matched but could not be deleted. Each result is returned as a
* CompletionStage that completes when the result is available.
*/
List<? extends CompletionStage<AclDeleteResult>> deleteAcls(AuthorizableRequestContext requestContext, List<AclBindingFilter> aclBindingFilters);
/**
* Returns ACL bindings which match the provided filter.
* <p>
* This is a synchronous API designed for use with locally cached ACLs. This method is invoked on the request
* thread while processing DescribeAcls requests and should avoid time-consuming remote communication that may
* block request threads.
*
* @return Iterator for ACL bindings, which may be populated lazily.
*/
Iterable<AclBinding> acls(AclBindingFilter filter);
/**
* Get the current number of ACLs, for the purpose of metrics. Authorizers that don't implement this function
* will simply return -1.
*/
default int aclCount() {
return -1;
}
/**
* Check if the caller is authorized to perform the given ACL operation on at least one
* resource of the given type.
*
* Custom authorizer implementations should consider overriding this default implementation because:
* 1. The default implementation iterates all AclBindings multiple times, without any caching
* by principal, host, operation, permission types, and resource types. More efficient
* implementations may be added in custom authorizers that directly access cached entries.
* 2. The default implementation cannot integrate with any audit logging included in the
* authorizer implementation.
* 3. The default implementation does not support any custom authorizer configs or other access
* rules apart from ACLs.
*
* @param requestContext Request context including request resourceType, security protocol and listener name
* @param op The ACL operation to check
* @param resourceType The resource type to check
* @return Return {@link AuthorizationResult#ALLOWED} if the caller is authorized
* to perform the given ACL operation on at least one resource of the
* given type. Return {@link AuthorizationResult#DENIED} otherwise.
*/
default AuthorizationResult authorizeByResourceType(AuthorizableRequestContext requestContext, AclOperation op, ResourceType resourceType) {
SecurityUtils.authorizeByResourceTypeCheckArgs(op, resourceType);
// Check a hard-coded name to ensure that super users are granted
// access regardless of DENY ACLs.
if (authorize(requestContext, Collections.singletonList(new Action(
op, new ResourcePattern(resourceType, "hardcode", PatternType.LITERAL),
0, true, false)))
.get(0) == AuthorizationResult.ALLOWED) {
return AuthorizationResult.ALLOWED;
}
// Filter out all the resource pattern corresponding to the RequestContext,
// AclOperation, and ResourceType
ResourcePatternFilter resourceTypeFilter = new ResourcePatternFilter(
resourceType, null, PatternType.ANY);
AclBindingFilter aclFilter = new AclBindingFilter(
resourceTypeFilter, AccessControlEntryFilter.ANY);
EnumMap<PatternType, Set<String>> denyPatterns =
new EnumMap<PatternType, Set<String>>(PatternType.class) {{
put(PatternType.LITERAL, new HashSet<>());
put(PatternType.PREFIXED, new HashSet<>());
}};
EnumMap<PatternType, Set<String>> allowPatterns =
new EnumMap<PatternType, Set<String>>(PatternType.class) {{
put(PatternType.LITERAL, new HashSet<>());
put(PatternType.PREFIXED, new HashSet<>());
}};
boolean hasWildCardAllow = false;
KafkaPrincipal principal = new KafkaPrincipal(
requestContext.principal().getPrincipalType(),
requestContext.principal().getName());
String hostAddr = requestContext.clientAddress().getHostAddress();
for (AclBinding binding : acls(aclFilter)) {
if (!binding.entry().host().equals(hostAddr) && !binding.entry().host().equals("*"))
continue;
if (!SecurityUtils.parseKafkaPrincipal(binding.entry().principal()).equals(principal)
&& !binding.entry().principal().equals("User:*"))
continue;
if (binding.entry().operation() != op
&& binding.entry().operation() != AclOperation.ALL)
continue;
if (binding.entry().permissionType() == AclPermissionType.DENY) {
switch (binding.pattern().patternType()) {
case LITERAL:
// If wildcard deny exists, return deny directly
if (binding.pattern().name().equals(ResourcePattern.WILDCARD_RESOURCE))
return AuthorizationResult.DENIED;
denyPatterns.get(PatternType.LITERAL).add(binding.pattern().name());
break;
case PREFIXED:
denyPatterns.get(PatternType.PREFIXED).add(binding.pattern().name());
break;
default:
}
continue;
}
if (binding.entry().permissionType() != AclPermissionType.ALLOW)
continue;
switch (binding.pattern().patternType()) {
case LITERAL:
if (binding.pattern().name().equals(ResourcePattern.WILDCARD_RESOURCE)) {
hasWildCardAllow = true;
continue;
}
allowPatterns.get(PatternType.LITERAL).add(binding.pattern().name());
break;
case PREFIXED:
allowPatterns.get(PatternType.PREFIXED).add(binding.pattern().name());
break;
default:
}
}
if (hasWildCardAllow) {
return AuthorizationResult.ALLOWED;
}
// For any literal allowed, if there's no dominant literal and prefix denied, return allow.
// For any prefix allowed, if there's no dominant prefix denied, return allow.
for (Map.Entry<PatternType, Set<String>> entry : allowPatterns.entrySet()) {
for (String allowStr : entry.getValue()) {
if (entry.getKey() == PatternType.LITERAL
&& denyPatterns.get(PatternType.LITERAL).contains(allowStr))
continue;
StringBuilder sb = new StringBuilder();
boolean hasDominatedDeny = false;
for (char ch : allowStr.toCharArray()) {
sb.append(ch);
if (denyPatterns.get(PatternType.PREFIXED).contains(sb.toString())) {
hasDominatedDeny = true;
break;
}
}
if (!hasDominatedDeny)
return AuthorizationResult.ALLOWED;
}
}
return AuthorizationResult.DENIED;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/authorizer/AuthorizerServerInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.authorizer;
import java.util.Collection;
import org.apache.kafka.common.ClusterResource;
import org.apache.kafka.common.Endpoint;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* Runtime broker configuration metadata provided to authorizers during start up.
*/
@InterfaceStability.Evolving
public interface AuthorizerServerInfo {
/**
* Returns cluster metadata for the broker running this authorizer including cluster id.
*/
ClusterResource clusterResource();
/**
* Returns broker id. This may be a generated broker id if `broker.id` was not configured.
*/
int brokerId();
/**
* Returns endpoints for all listeners including the advertised host and port to which
* the listener is bound.
*/
Collection<Endpoint> endpoints();
/**
* Returns the inter-broker endpoint. This is one of the endpoints returned by {@link #endpoints()}.
*/
Endpoint interBrokerEndpoint();
/**
* Returns the configured early start listeners.
*/
Collection<String> earlyStartListeners();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/authorizer/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides pluggable interface for performing authorization on a Kafka server.
*/
package org.apache.kafka.server.authorizer; |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/policy/AlterConfigPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.policy;
import org.apache.kafka.common.Configurable;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.errors.PolicyViolationException;
import java.util.Map;
import java.util.Objects;
/**
* <p>An interface for enforcing a policy on alter configs requests.
*
* <p>Common use cases are requiring that the replication factor, <code>min.insync.replicas</code> and/or retention settings for a
* topic remain within an allowable range.
*
* <p>If <code>alter.config.policy.class.name</code> is defined, Kafka will create an instance of the specified class
* using the default constructor and will then pass the broker configs to its <code>configure()</code> method. During
* broker shutdown, the <code>close()</code> method will be invoked so that resources can be released (if necessary).
*/
public interface AlterConfigPolicy extends Configurable, AutoCloseable {
/**
* Class containing the create request parameters.
*/
class RequestMetadata {
private final ConfigResource resource;
private final Map<String, String> configs;
/**
* Create an instance of this class with the provided parameters.
*
* This constructor is public to make testing of <code>AlterConfigPolicy</code> implementations easier.
*/
public RequestMetadata(ConfigResource resource, Map<String, String> configs) {
this.resource = resource;
this.configs = configs;
}
/**
* Return the configs in the request.
*/
public Map<String, String> configs() {
return configs;
}
public ConfigResource resource() {
return resource;
}
@Override
public int hashCode() {
return Objects.hash(resource, configs);
}
@Override
public boolean equals(Object o) {
if ((o == null) || (!o.getClass().equals(getClass()))) return false;
RequestMetadata other = (RequestMetadata) o;
return resource.equals(other.resource) &&
configs.equals(other.configs);
}
@Override
public String toString() {
return "AlterConfigPolicy.RequestMetadata(resource=" + resource +
", configs=" + configs + ")";
}
}
/**
* Validate the request parameters and throw a <code>PolicyViolationException</code> with a suitable error
* message if the alter configs request parameters for the provided resource do not satisfy this policy.
*
* Clients will receive the POLICY_VIOLATION error code along with the exception's message. Note that validation
* failure only affects the relevant resource, other resources in the request will still be processed.
*
* @param requestMetadata the alter configs request parameters for the provided resource (topic is the only resource
* type whose configs can be updated currently).
* @throws PolicyViolationException if the request parameters do not satisfy this policy.
*/
void validate(RequestMetadata requestMetadata) throws PolicyViolationException;
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/policy/CreateTopicPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.policy;
import org.apache.kafka.common.Configurable;
import org.apache.kafka.common.errors.PolicyViolationException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* <p>An interface for enforcing a policy on create topics requests.
*
* <p>Common use cases are requiring that the replication factor, <code>min.insync.replicas</code> and/or retention settings for a
* topic are within an allowable range.
*
* <p>If <code>create.topic.policy.class.name</code> is defined, Kafka will create an instance of the specified class
* using the default constructor and will then pass the broker configs to its <code>configure()</code> method. During
* broker shutdown, the <code>close()</code> method will be invoked so that resources can be released (if necessary).
*/
public interface CreateTopicPolicy extends Configurable, AutoCloseable {
/**
* Class containing the create request parameters.
*/
class RequestMetadata {
private final String topic;
private final Integer numPartitions;
private final Short replicationFactor;
private final Map<Integer, List<Integer>> replicasAssignments;
private final Map<String, String> configs;
/**
* Create an instance of this class with the provided parameters.
*
* This constructor is public to make testing of <code>CreateTopicPolicy</code> implementations easier.
*
* @param topic the name of the topic to created.
* @param numPartitions the number of partitions to create or null if replicasAssignments is set.
* @param replicationFactor the replication factor for the topic or null if replicaAssignments is set.
* @param replicasAssignments replica assignments or null if numPartitions and replicationFactor is set. The
* assignment is a map from partition id to replica (broker) ids.
* @param configs topic configs for the topic to be created, not including broker defaults. Broker configs are
* passed via the {@code configure()} method of the policy implementation.
*/
public RequestMetadata(String topic, Integer numPartitions, Short replicationFactor,
Map<Integer, List<Integer>> replicasAssignments, Map<String, String> configs) {
this.topic = topic;
this.numPartitions = numPartitions;
this.replicationFactor = replicationFactor;
this.replicasAssignments = replicasAssignments == null ? null : Collections.unmodifiableMap(replicasAssignments);
this.configs = Collections.unmodifiableMap(configs);
}
/**
* Return the name of the topic to create.
*/
public String topic() {
return topic;
}
/**
* Return the number of partitions to create or null if replicaAssignments is not null.
*/
public Integer numPartitions() {
return numPartitions;
}
/**
* Return the number of replicas to create or null if replicaAssignments is not null.
*/
public Short replicationFactor() {
return replicationFactor;
}
/**
* Return a map from partition id to replica (broker) ids or null if numPartitions and replicationFactor are
* set instead.
*/
public Map<Integer, List<Integer>> replicasAssignments() {
return replicasAssignments;
}
/**
* Return topic configs in the request, not including broker defaults. Broker configs are passed via
* the {@code configure()} method of the policy implementation.
*/
public Map<String, String> configs() {
return configs;
}
@Override
public int hashCode() {
return Objects.hash(topic, numPartitions, replicationFactor,
replicasAssignments, configs);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RequestMetadata other = (RequestMetadata) o;
return topic.equals(other.topic) &&
Objects.equals(numPartitions, other.numPartitions) &&
Objects.equals(replicationFactor, other.replicationFactor) &&
Objects.equals(replicasAssignments, other.replicasAssignments) &&
configs.equals(other.configs);
}
@Override
public String toString() {
return "CreateTopicPolicy.RequestMetadata(topic=" + topic +
", numPartitions=" + numPartitions +
", replicationFactor=" + replicationFactor +
", replicasAssignments=" + replicasAssignments +
", configs=" + configs + ")";
}
}
/**
* Validate the request parameters and throw a <code>PolicyViolationException</code> with a suitable error
* message if the create topics request parameters for the provided topic do not satisfy this policy.
*
* Clients will receive the POLICY_VIOLATION error code along with the exception's message. Note that validation
* failure only affects the relevant topic, other topics in the request will still be processed.
*
* @param requestMetadata the create topics request parameters for the provided topic.
* @throws PolicyViolationException if the request parameters do not satisfy this policy.
*/
void validate(RequestMetadata requestMetadata) throws PolicyViolationException;
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/policy/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides pluggable interfaces for expressing policies on topics and configs.
*/
package org.apache.kafka.server.policy; |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/quota/ClientQuotaCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.quota;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.Configurable;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import java.util.Map;
/**
* Quota callback interface for brokers that enables customization of client quota computation.
*/
public interface ClientQuotaCallback extends Configurable {
/**
* Quota callback invoked to determine the quota metric tags to be applied for a request.
* Quota limits are associated with quota metrics and all clients which use the same
* metric tags share the quota limit.
*
* @param quotaType Type of quota requested
* @param principal The user principal of the connection for which quota is requested
* @param clientId The client id associated with the request
* @return quota metric tags that indicate which other clients share this quota
*/
Map<String, String> quotaMetricTags(ClientQuotaType quotaType, KafkaPrincipal principal, String clientId);
/**
* Returns the quota limit associated with the provided metric tags. These tags were returned from
* a previous call to {@link #quotaMetricTags(ClientQuotaType, KafkaPrincipal, String)}. This method is
* invoked by quota managers to obtain the current quota limit applied to a metric when the first request
* using these tags is processed. It is also invoked after a quota update or cluster metadata change.
* If the tags are no longer in use after the update, (e.g. this is a {user, client-id} quota metric
* and the quota now in use is a {user} quota), null is returned.
*
* @param quotaType Type of quota requested
* @param metricTags Metric tags for a quota metric of type `quotaType`
* @return the quota limit for the provided metric tags or null if the metric tags are no longer in use
*/
Double quotaLimit(ClientQuotaType quotaType, Map<String, String> metricTags);
/**
* Quota configuration update callback that is invoked when quota configuration for an entity is
* updated in the quorum. This is useful to track configured quotas if built-in quota configuration
* tools are used for quota management.
*
* @param quotaType Type of quota being updated
* @param quotaEntity The quota entity for which quota is being updated
* @param newValue The new quota value
*/
void updateQuota(ClientQuotaType quotaType, ClientQuotaEntity quotaEntity, double newValue);
/**
* Quota configuration removal callback that is invoked when quota configuration for an entity is
* removed in the quorum. This is useful to track configured quotas if built-in quota configuration
* tools are used for quota management.
*
* @param quotaType Type of quota being updated
* @param quotaEntity The quota entity for which quota is being updated
*/
void removeQuota(ClientQuotaType quotaType, ClientQuotaEntity quotaEntity);
/**
* Returns true if any of the existing quota configs may have been updated since the last call
* to this method for the provided quota type. Quota updates as a result of calls to
* {@link #updateClusterMetadata(Cluster)}, {@link #updateQuota(ClientQuotaType, ClientQuotaEntity, double)}
* and {@link #removeQuota(ClientQuotaType, ClientQuotaEntity)} are automatically processed.
* So callbacks that rely only on built-in quota configuration tools always return false. Quota callbacks
* with external quota configuration or custom reconfigurable quota configs that affect quota limits must
* return true if existing metric configs may need to be updated. This method is invoked on every request
* and hence is expected to be handled by callbacks as a simple flag that is updated when quotas change.
*
* @param quotaType Type of quota
*/
boolean quotaResetRequired(ClientQuotaType quotaType);
/**
* Metadata update callback that is invoked whenever UpdateMetadata request is received from
* the controller. This is useful if quota computation takes partitions into account.
* Topics that are being deleted will not be included in `cluster`.
*
* @param cluster Cluster metadata including partitions and their leaders if known
* @return true if quotas have changed and metric configs may need to be updated
*/
boolean updateClusterMetadata(Cluster cluster);
/**
* Closes this instance.
*/
void close();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/quota/ClientQuotaEntity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.quota;
import java.util.List;
/**
* The metadata for an entity for which quota is configured. Quotas may be defined at
* different levels and `configEntities` gives the list of config entities that define
* the level of this quota entity.
*/
public interface ClientQuotaEntity {
/**
* Entity type of a {@link ConfigEntity}
*/
enum ConfigEntityType {
USER,
CLIENT_ID,
DEFAULT_USER,
DEFAULT_CLIENT_ID
}
/**
* Interface representing a quota configuration entity. Quota may be
* configured at levels that include one or more configuration entities.
* For example, {user, client-id} quota is represented using two
* instances of ConfigEntity with entity types USER and CLIENT_ID.
*/
interface ConfigEntity {
/**
* Returns the name of this entity. For default quotas, an empty string is returned.
*/
String name();
/**
* Returns the type of this entity.
*/
ConfigEntityType entityType();
}
/**
* Returns the list of configuration entities that this quota entity is comprised of.
* For {user} or {clientId} quota, this is a single entity and for {user, clientId}
* quota, this is a list of two entities.
*/
List<ConfigEntity> configEntities();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/quota/ClientQuotaType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.quota;
/**
* Types of quotas that may be configured on brokers for client requests.
*/
public enum ClientQuotaType {
PRODUCE,
FETCH,
REQUEST,
CONTROLLER_MUTATION
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/server/quota/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides pluggable interface for enforcing client quotas from a Kafka server.
*/
package org.apache.kafka.server.quota; |
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/KafkaException.java | /*
* Copyright 2016-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka;
import org.springframework.core.NestedRuntimeException;
import org.springframework.core.log.LogAccessor;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
/**
* The Spring for Apache Kafka specific {@link NestedRuntimeException} implementation.
*
* @author Gary Russell
* @author Artem Bilan
*/
@SuppressWarnings("serial")
public class KafkaException extends NestedRuntimeException {
/**
* The log level for {@link KafkaException}.
* @since 2.5
*/
public enum Level {
/**
* Fatal.
*/
FATAL,
/**
* Error.
*/
ERROR,
/**
* Warn.
*/
WARN,
/**
* Info.
*/
INFO,
/**
* Debug.
*/
DEBUG,
/**
* Trace.
*/
TRACE
}
private final Level logLevel;
/**
* Construct an instance with the provided properties.
* @param message the message.
*/
public KafkaException(String message) {
this(message, Level.ERROR, null);
}
/**
* Construct an instance with the provided properties.
* @param message the message.
* @param cause the cause.
*/
public KafkaException(String message, @Nullable Throwable cause) {
this(message, Level.ERROR, cause);
}
/**
* Construct an instance with the provided properties.
* @param message the message.
* @param level the level at which this exception should be logged when using
* {@link #selfLog(String, LogAccessor)}.
* @param cause the cause.
*/
public KafkaException(String message, Level level, @Nullable Throwable cause) {
super(message, cause);
Assert.notNull(level, "'level' cannot be null");
this.logLevel = level;
}
/**
* Log this exception at its log level.
* @param message the message.
* @param logger the log accessor.
*/
public void selfLog(String message, LogAccessor logger) {
switch (this.logLevel) {
case FATAL:
logger.fatal(this, message);
break;
case ERROR:
logger.error(this, message);
break;
case WARN:
logger.warn(this, message);
break;
case INFO:
logger.info(this, message);
break;
case DEBUG:
logger.debug(this, message);
break;
case TRACE:
logger.trace(this, message);
break;
default:
logger.error(this, message);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/package-info.java | /**
* Base package for kafka
*/
package org.springframework.kafka;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/DltHandler.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* das
*/
package org.springframework.kafka.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation to determine the method the should process the DLT topic message.
* The method can have the same parameters as a {@link KafkaListener} method can (Message, Acknowledgement, etc).
*
* The annotated method must be in the same class as the corresponding {@link KafkaListener} annotation.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface DltHandler {
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/EnableKafka.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.springframework.context.annotation.Import;
/**
* Enable Kafka listener annotated endpoints that are created under the covers by a
* {@link org.springframework.kafka.config.AbstractKafkaListenerContainerFactory
* AbstractListenerContainerFactory}. To be used on
* {@link org.springframework.context.annotation.Configuration Configuration} classes as
* follows:
*
* <pre class="code">
* @Configuration
* @EnableKafka
* public class AppConfig {
* @Bean
* public ConcurrentKafkaListenerContainerFactory myKafkaListenerContainerFactory() {
* ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();
* factory.setConsumerFactory(consumerFactory());
* factory.setConcurrency(4);
* return factory;
* }
* // other @Bean definitions
* }
* </pre>
*
* The {@code KafkaListenerContainerFactory} is responsible to create the listener
* container for a particular endpoint. Typical implementations, as the
* {@link org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory
* ConcurrentKafkaListenerContainerFactory} used in the sample above, provides the
* necessary configuration options that are supported by the underlying
* {@link org.springframework.kafka.listener.MessageListenerContainer
* MessageListenerContainer}.
*
* <p>
* {@code @EnableKafka} enables detection of {@link KafkaListener} annotations on any
* Spring-managed bean in the container. For example, given a class {@code MyService}:
*
* <pre class="code">
* package com.acme.foo;
*
* public class MyService {
* @KafkaListener(containerFactory = "myKafkaListenerContainerFactory", topics = "myTopic")
* public void process(String msg) {
* // process incoming message
* }
* }
* </pre>
*
* The container factory to use is identified by the
* {@link KafkaListener#containerFactory() containerFactory} attribute defining the name
* of the {@code KafkaListenerContainerFactory} bean to use. When none is set a
* {@code KafkaListenerContainerFactory} bean with name
* {@code kafkaListenerContainerFactory} is assumed to be present.
*
* <p>
* the following configuration would ensure that every time a message is received from
* topic "myQueue", {@code MyService.process()} is called with the content of the message:
*
* <pre class="code">
* @Configuration
* @EnableKafka
* public class AppConfig {
* @Bean
* public MyService myService() {
* return new MyService();
* }
*
* // Kafka infrastructure setup
* }
* </pre>
*
* Alternatively, if {@code MyService} were annotated with {@code @Component}, the
* following configuration would ensure that its {@code @KafkaListener} annotated method
* is invoked with a matching incoming message:
*
* <pre class="code">
* @Configuration
* @EnableKafka
* @ComponentScan(basePackages = "com.acme.foo")
* public class AppConfig {
* }
* </pre>
*
* Note that the created containers are not registered with the application context but
* can be easily located for management purposes using the
* {@link org.springframework.kafka.config.KafkaListenerEndpointRegistry
* KafkaListenerEndpointRegistry}.
*
* <p>
* Annotated methods can use a flexible signature; in particular, it is possible to use
* the {@link org.springframework.messaging.Message Message} abstraction and related
* annotations, see {@link KafkaListener} Javadoc for more details. For instance, the
* following would inject the content of the message and the kafka partition header:
*
* <pre class="code">
* @KafkaListener(containerFactory = "myKafkaListenerContainerFactory", topics = "myTopic")
* public void process(String msg, @Header("kafka_partition") int partition) {
* // process incoming message
* }
* </pre>
*
* These features are abstracted by the
* {@link org.springframework.messaging.handler.annotation.support.MessageHandlerMethodFactory
* MessageHandlerMethodFactory} that is responsible to build the necessary invoker to
* process the annotated method. By default,
* {@link org.springframework.messaging.handler.annotation.support.DefaultMessageHandlerMethodFactory
* DefaultMessageHandlerMethodFactory} is used.
*
* <p>
* When more control is desired, a {@code @Configuration} class may implement
* {@link KafkaListenerConfigurer}. This allows access to the underlying
* {@link org.springframework.kafka.config.KafkaListenerEndpointRegistrar
* KafkaListenerEndpointRegistrar} instance. The following example demonstrates how to
* specify an explicit default {@code KafkaListenerContainerFactory}
*
* <pre class="code">
* @Configuration
* @EnableKafka
* public class AppConfig implements KafkaListenerConfigurer {
* @Override
* public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) {
* registrar.setContainerFactory(myKafkaListenerContainerFactory());
* }
*
* @Bean
* public KafkaListenerContainerFactory<?, ?> myKafkaListenerContainerFactory() {
* // factory settings
* }
*
* @Bean
* public MyService myService() {
* return new MyService();
* }
* }
* </pre>
*
* It is also possible to specify a custom
* {@link org.springframework.kafka.config.KafkaListenerEndpointRegistry
* KafkaListenerEndpointRegistry} in case you need more control on the way the containers
* are created and managed. The example below also demonstrates how to customize the
* {@link org.springframework.messaging.handler.annotation.support.DefaultMessageHandlerMethodFactory}
* as well as how to supply a custom {@link org.springframework.validation.Validator
* Validator} so that payloads annotated with
* {@link org.springframework.validation.annotation.Validated Validated} are first
* validated against a custom {@code Validator}.
*
* <pre class="code">
* @Configuration
* @EnableKafka
* public class AppConfig implements KafkaListenerConfigurer {
* @Override
* public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) {
* registrar.setEndpointRegistry(myKafkaListenerEndpointRegistry());
* registrar.setMessageHandlerMethodFactory(myMessageHandlerMethodFactory);
* registrar.setValidator(new MyValidator());
* }
*
* @Bean
* public KafkaListenerEndpointRegistry myKafkaListenerEndpointRegistry() {
* // registry configuration
* }
*
* @Bean
* public MessageHandlerMethodFactory myMessageHandlerMethodFactory() {
* DefaultMessageHandlerMethodFactory factory = new DefaultMessageHandlerMethodFactory();
* // factory configuration
* return factory;
* }
*
* @Bean
* public MyService myService() {
* return new MyService();
* }
* }
* </pre>
*
* Implementing {@code KafkaListenerConfigurer} also allows for fine-grained control over
* endpoints registration via the {@code KafkaListenerEndpointRegistrar}. For example, the
* following configures an extra endpoint:
*
* <pre class="code">
* @Configuration
* @EnableKafka
* public class AppConfig implements KafkaListenerConfigurer {
* @Override
* public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) {
* SimpleKafkaListenerEndpoint myEndpoint = new SimpleKafkaListenerEndpoint();
* // ... configure the endpoint
* registrar.registerEndpoint(endpoint, anotherKafkaListenerContainerFactory());
* }
*
* @Bean
* public MyService myService() {
* return new MyService();
* }
*
* @Bean
* public KafkaListenerContainerFactory<?, ?> anotherKafkaListenerContainerFactory() {
* // ...
* }
*
* // Kafka infrastructure setup
* }
* </pre>
*
* Note that all beans implementing {@code KafkaListenerConfigurer} will be detected and
* invoked in a similar fashion. The example above can be translated in a regular bean
* definition registered in the context in case you use the XML configuration.
*
* @author Stephane Nicoll
* @author Gary Russell
* @author Artem Bilan
*
* @see KafkaListener
* @see KafkaListenerAnnotationBeanPostProcessor
* @see org.springframework.kafka.config.KafkaListenerEndpointRegistrar
* @see org.springframework.kafka.config.KafkaListenerEndpointRegistry
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Import(KafkaListenerConfigurationSelector.class)
public @interface EnableKafka {
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/EnableKafkaStreams.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.springframework.context.annotation.Import;
/**
* Enable default Kafka Streams components. To be used on
* {@link org.springframework.context.annotation.Configuration Configuration} classes as
* follows:
*
* <pre class="code">
* @Configuration
* @EnableKafkaStreams
* public class AppConfig {
*
* @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME)
* public KafkaStreamsConfiguration kStreamsConfigs() {
* ...
* }
* // other @Bean definitions
* }
* </pre>
*
* That {@link KafkaStreamsDefaultConfiguration#DEFAULT_STREAMS_CONFIG_BEAN_NAME} is
* required to declare {@link org.springframework.kafka.config.StreamsBuilderFactoryBean}
* with the {@link KafkaStreamsDefaultConfiguration#DEFAULT_STREAMS_BUILDER_BEAN_NAME}.
* <p>
* Also to enable Kafka Streams feature you should be sure that the {@code kafka-streams}
* jar is on classpath.
*
* @author Artem Bilan
*
* @since 1.1.4
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Import(KafkaStreamsDefaultConfiguration.class)
public @interface EnableKafkaStreams {
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/KafkaBootstrapConfiguration.java | /*
* Copyright 2002-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.context.annotation.ImportBeanDefinitionRegistrar;
import org.springframework.core.type.AnnotationMetadata;
import org.springframework.kafka.config.KafkaListenerConfigUtils;
import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
/**
* An {@link ImportBeanDefinitionRegistrar} class that registers a {@link KafkaListenerAnnotationBeanPostProcessor}
* bean capable of processing Spring's @{@link KafkaListener} annotation. Also register
* a default {@link KafkaListenerEndpointRegistry}.
*
* <p>This configuration class is automatically imported when using the @{@link EnableKafka}
* annotation. See {@link EnableKafka} Javadoc for complete usage.
*
* @author Stephane Nicoll
* @author Gary Russell
* @author Artem Bilan
*
* @see KafkaListenerAnnotationBeanPostProcessor
* @see KafkaListenerEndpointRegistry
* @see EnableKafka
*/
public class KafkaBootstrapConfiguration implements ImportBeanDefinitionRegistrar {
@Override
public void registerBeanDefinitions(AnnotationMetadata importingClassMetadata, BeanDefinitionRegistry registry) {
if (!registry.containsBeanDefinition(
KafkaListenerConfigUtils.KAFKA_LISTENER_ANNOTATION_PROCESSOR_BEAN_NAME)) {
registry.registerBeanDefinition(KafkaListenerConfigUtils.KAFKA_LISTENER_ANNOTATION_PROCESSOR_BEAN_NAME,
new RootBeanDefinition(KafkaListenerAnnotationBeanPostProcessor.class));
}
if (!registry.containsBeanDefinition(KafkaListenerConfigUtils.KAFKA_LISTENER_ENDPOINT_REGISTRY_BEAN_NAME)) {
registry.registerBeanDefinition(KafkaListenerConfigUtils.KAFKA_LISTENER_ENDPOINT_REGISTRY_BEAN_NAME,
new RootBeanDefinition(KafkaListenerEndpointRegistry.class));
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/KafkaHandler.java | /*
* Copyright 2015-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.springframework.messaging.handler.annotation.MessageMapping;
/**
* Annotation that marks a method to be the target of a Kafka message
* listener within a class that is annotated with {@link KafkaListener}.
*
* <p>See the {@link KafkaListener} for information about permitted method signatures
* and available parameters.
* <p><b>It is important to understand that when a message arrives, the method selection
* depends on the payload type. The type is matched with a single non-annotated parameter,
* or one that is annotated with {@code @Payload}.
* There must be no ambiguity - the system
* must be able to select exactly one method based on the payload type.</b>
*
* @author Gary Russell
*
* @see EnableKafka
* @see KafkaListener
* @see KafkaListenerAnnotationBeanPostProcessor
*/
@Target({ ElementType.METHOD, ElementType.ANNOTATION_TYPE })
@Retention(RetentionPolicy.RUNTIME)
@MessageMapping
@Documented
public @interface KafkaHandler {
/**
* When true, designate that this is the default fallback method if the payload type
* matches no other {@link KafkaHandler} method. Only one method can be so designated.
* @return true if this is the default method.
* @since 2.1.3
*/
boolean isDefault() default false;
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/KafkaListener.java | /*
* Copyright 2016-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Repeatable;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.springframework.messaging.handler.annotation.MessageMapping;
/**
* Annotation that marks a method to be the target of a Kafka message listener on the
* specified topics.
*
* The {@link #containerFactory()} identifies the
* {@link org.springframework.kafka.config.KafkaListenerContainerFactory
* KafkaListenerContainerFactory} to use to build the Kafka listener container. If not
* set, a <em>default</em> container factory is assumed to be available with a bean name
* of {@code kafkaListenerContainerFactory} unless an explicit default has been provided
* through configuration.
*
* <p>
* Processing of {@code @KafkaListener} annotations is performed by registering a
* {@link KafkaListenerAnnotationBeanPostProcessor}. This can be done manually or, more
* conveniently, through {@link EnableKafka} annotation.
*
* <p>
* Annotated methods are allowed to have flexible signatures similar to what
* {@link MessageMapping} provides, that is
* <ul>
* <li>{@link org.apache.kafka.clients.consumer.ConsumerRecord} to access to the raw Kafka
* message</li>
* <li>{@link org.springframework.kafka.support.Acknowledgment} to manually ack</li>
* <li>{@link org.springframework.messaging.handler.annotation.Payload @Payload}-annotated
* method arguments including the support of validation</li>
* <li>{@link org.springframework.messaging.handler.annotation.Header @Header}-annotated
* method arguments to extract a specific header value, defined by
* {@link org.springframework.kafka.support.KafkaHeaders KafkaHeaders}</li>
* <li>{@link org.springframework.messaging.handler.annotation.Headers @Headers}-annotated
* argument that must also be assignable to {@link java.util.Map} for getting access to
* all headers.</li>
* <li>{@link org.springframework.messaging.MessageHeaders MessageHeaders} arguments for
* getting access to all headers.</li>
* <li>{@link org.springframework.messaging.support.MessageHeaderAccessor
* MessageHeaderAccessor} for convenient access to all method arguments.</li>
* </ul>
*
* <p>When defined at the method level, a listener container is created for each method.
* The {@link org.springframework.kafka.listener.MessageListener} is a
* {@link org.springframework.kafka.listener.adapter.MessagingMessageListenerAdapter},
* configured with a {@link org.springframework.kafka.config.MethodKafkaListenerEndpoint}.
*
* <p>When defined at the class level, a single message listener container is used to
* service all methods annotated with {@code @KafkaHandler}. Method signatures of such
* annotated methods must not cause any ambiguity such that a single method can be
* resolved for a particular inbound message. The
* {@link org.springframework.kafka.listener.adapter.MessagingMessageListenerAdapter} is
* configured with a
* {@link org.springframework.kafka.config.MultiMethodKafkaListenerEndpoint}.
*
* @author Gary Russell
* @author Venil Noronha
*
* @see EnableKafka
* @see KafkaListenerAnnotationBeanPostProcessor
* @see KafkaListeners
*/
@Target({ ElementType.TYPE, ElementType.METHOD, ElementType.ANNOTATION_TYPE })
@Retention(RetentionPolicy.RUNTIME)
@MessageMapping
@Documented
@Repeatable(KafkaListeners.class)
public @interface KafkaListener {
/**
* The unique identifier of the container for this listener.
* <p>If none is specified an auto-generated id is used.
* <p>Note: When provided, this value will override the group id property
* in the consumer factory configuration, unless {@link #idIsGroup()}
* is set to false or {@link #groupId()} is provided.
* <p>SpEL {@code #{...}} and property place holders {@code ${...}} are supported.
* @return the {@code id} for the container managing for this endpoint.
* @see org.springframework.kafka.config.KafkaListenerEndpointRegistry#getListenerContainer(String)
*/
String id() default "";
/**
* The bean name of the {@link org.springframework.kafka.config.KafkaListenerContainerFactory}
* to use to create the message listener container responsible to serve this endpoint.
* <p>
* If not specified, the default container factory is used, if any. If a SpEL
* expression is provided ({@code #{...}}), the expression can either evaluate to a
* container factory instance or a bean name.
* @return the container factory bean name.
*/
String containerFactory() default "";
/**
* The topics for this listener.
* The entries can be 'topic name', 'property-placeholder keys' or 'expressions'.
* An expression must be resolved to the topic name.
* This uses group management and Kafka will assign partitions to group members.
* <p>
* Mutually exclusive with {@link #topicPattern()} and {@link #topicPartitions()}.
* @return the topic names or expressions (SpEL) to listen to.
*/
String[] topics() default {};
/**
* The topic pattern for this listener. The entries can be 'topic pattern', a
* 'property-placeholder key' or an 'expression'. The framework will create a
* container that subscribes to all topics matching the specified pattern to get
* dynamically assigned partitions. The pattern matching will be performed
* periodically against topics existing at the time of check. An expression must
* be resolved to the topic pattern (String or Pattern result types are supported).
* This uses group management and Kafka will assign partitions to group members.
* <p>
* Mutually exclusive with {@link #topics()} and {@link #topicPartitions()}.
* @return the topic pattern or expression (SpEL).
* @see org.apache.kafka.clients.CommonClientConfigs#METADATA_MAX_AGE_CONFIG
*/
String topicPattern() default "";
/**
* The topicPartitions for this listener when using manual topic/partition
* assignment.
* <p>
* Mutually exclusive with {@link #topicPattern()} and {@link #topics()}.
* @return the topic names or expressions (SpEL) to listen to.
*/
TopicPartition[] topicPartitions() default {};
/**
* If provided, the listener container for this listener will be added to a bean with
* this value as its name, of type {@code Collection<MessageListenerContainer>}. This
* allows, for example, iteration over the collection to start/stop a subset of
* containers. The {@code Collection} beans are deprecated as of version 2.7.3 and
* will be removed in 2.8. Instead, a bean with name {@code containerGroup + ".group"}
* and type {@link org.springframework.kafka.listener.ContainerGroup} should be used
* instead.
* <p>
* SpEL {@code #{...}} and property place holders {@code ${...}} are supported.
* @return the bean name for the group.
*/
String containerGroup() default "";
/**
* Set an {@link org.springframework.kafka.listener.KafkaListenerErrorHandler} bean
* name to invoke if the listener method throws an exception. If a SpEL expression is
* provided ({@code #{...}}), the expression can either evaluate to a
* {@link org.springframework.kafka.listener.KafkaListenerErrorHandler} instance or a
* bean name.
* @return the error handler.
* @since 1.3
*/
String errorHandler() default "";
/**
* Override the {@code group.id} property for the consumer factory with this value
* for this listener only.
* <p>SpEL {@code #{...}} and property place holders {@code ${...}} are supported.
* @return the group id.
* @since 1.3
*/
String groupId() default "";
/**
* When {@link #groupId() groupId} is not provided, use the {@link #id() id} (if
* provided) as the {@code group.id} property for the consumer. Set to false, to use
* the {@code group.id} from the consumer factory.
* @return false to disable.
* @since 1.3
*/
boolean idIsGroup() default true;
/**
* When provided, overrides the client id property in the consumer factory
* configuration. A suffix ('-n') is added for each container instance to ensure
* uniqueness when concurrency is used.
* <p>SpEL {@code #{...}} and property place holders {@code ${...}} are supported.
* @return the client id prefix.
* @since 2.1.1
*/
String clientIdPrefix() default "";
/**
* A pseudo bean name used in SpEL expressions within this annotation to reference
* the current bean within which this listener is defined. This allows access to
* properties and methods within the enclosing bean.
* Default '__listener'.
* <p>
* Example: {@code topics = "#{__listener.topicList}"}.
* @return the pseudo bean name.
* @since 2.1.2
*/
String beanRef() default "__listener";
/**
* Override the container factory's {@code concurrency} setting for this listener. May
* be a property placeholder or SpEL expression that evaluates to a {@link Number}, in
* which case {@link Number#intValue()} is used to obtain the value.
* <p>SpEL {@code #{...}} and property place holders {@code ${...}} are supported.
* @return the concurrency.
* @since 2.2
*/
String concurrency() default "";
/**
* Set to true or false, to override the default setting in the container factory. May
* be a property placeholder or SpEL expression that evaluates to a {@link Boolean} or
* a {@link String}, in which case the {@link Boolean#parseBoolean(String)} is used to
* obtain the value.
* <p>SpEL {@code #{...}} and property place holders {@code ${...}} are supported.
* @return true to auto start, false to not auto start.
* @since 2.2
*/
String autoStartup() default "";
/**
* Kafka consumer properties; they will supersede any properties with the same name
* defined in the consumer factory (if the consumer factory supports property overrides).
* <p>
* <b>Supported Syntax</b>
* <p>The supported syntax for key-value pairs is the same as the
* syntax defined for entries in a Java
* {@linkplain java.util.Properties#load(java.io.Reader) properties file}:
* <ul>
* <li>{@code key=value}</li>
* <li>{@code key:value}</li>
* <li>{@code key value}</li>
* </ul>
* {@code group.id} and {@code client.id} are ignored.
* <p>SpEL {@code #{...}} and property place holders {@code ${...}} are supported.
* SpEL expressions must resolve to a {@link String}, a @{link String[]} or a
* {@code Collection<String>} where each member of the array or collection is a
* property name + value with the above formats.
* @return the properties.
* @since 2.2.4
* @see org.apache.kafka.clients.consumer.ConsumerConfig
* @see #groupId()
* @see #clientIdPrefix()
*/
String[] properties() default {};
/**
* When false and the return type is an {@link Iterable} return the result as the
* value of a single reply record instead of individual records for each element.
* Default true. Ignored if the reply is of type {@code Iterable<Message<?>>}.
* @return false to create a single reply record.
* @since 2.3.5
*/
boolean splitIterables() default true;
/**
* Set the bean name of a
* {@link org.springframework.messaging.converter.SmartMessageConverter} (such as the
* {@link org.springframework.messaging.converter.CompositeMessageConverter}) to use
* in conjunction with the
* {@link org.springframework.messaging.MessageHeaders#CONTENT_TYPE} header to perform
* the conversion to the required type. If a SpEL expression is provided
* ({@code #{...}}), the expression can either evaluate to a
* {@link org.springframework.messaging.converter.SmartMessageConverter} instance or a
* bean name.
* @return the bean name.
* @since 2.7.1
*/
String contentTypeConverter() default "";
/**
* Override the container factory's {@code batchListener} property. The listener
* method signature should receive a {@code List<?>}; refer to the reference
* documentation. This allows a single container factory to be used for both record
* and batch listeners; previously separate container factories were required.
* @return "true" for the annotated method to be a batch listener or "false" for a
* record listener. If not set, the container factory setting is used. SpEL and
* property placeholders are not supported because the listener type cannot be
* variable.
* @since 2.8
* @see Boolean#parseBoolean(String)
*/
String batch() default "";
/**
* Set an {@link org.springframework.kafka.listener.adapter.RecordFilterStrategy} bean
* name to override the strategy configured on the container factory. If a SpEL
* expression is provided ({@code #{...}}), the expression can either evaluate to a
* {@link org.springframework.kafka.listener.adapter.RecordFilterStrategy} instance or
* a bean name.
* @return the error handler.
* @since 2.8.4
*/
String filter() default "";
/**
* Static information that will be added as a header with key
* {@link org.springframework.kafka.support.KafkaHeaders#LISTENER_INFO}. This can be
* used, for example, in a
* {@link org.springframework.kafka.listener.RecordInterceptor},
* {@link org.springframework.kafka.listener.adapter.RecordFilterStrategy} or the
* listener itself, for any purposes.
* <p>
* SpEL {@code #{...}} and property place holders {@code ${...}} are supported, but it
* must resolve to a String or {@code byte[]}.
* <p>
* This header will be stripped out if an outbound record is created with the headers
* from an input record.
* @return the info.
* @since 2.8.4
*/
String info() default "";
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/KafkaListenerAnnotationBeanPostProcessor.java | /*
* Copyright 2014-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.io.IOException;
import java.io.StringReader;
import java.lang.reflect.AnnotatedElement;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiFunction;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.logging.LogFactory;
import org.springframework.aop.framework.Advised;
import org.springframework.aop.support.AopUtils;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanInitializationException;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.ListableBeanFactory;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.beans.factory.ObjectFactory;
import org.springframework.beans.factory.SmartInitializingSingleton;
import org.springframework.beans.factory.config.BeanExpressionContext;
import org.springframework.beans.factory.config.BeanExpressionResolver;
import org.springframework.beans.factory.config.BeanPostProcessor;
import org.springframework.beans.factory.config.ConfigurableBeanFactory;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.beans.factory.config.Scope;
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.expression.StandardBeanExpressionResolver;
import org.springframework.core.MethodIntrospector;
import org.springframework.core.OrderComparator;
import org.springframework.core.Ordered;
import org.springframework.core.annotation.AnnotatedElementUtils;
import org.springframework.core.annotation.AnnotationUtils;
import org.springframework.core.convert.TypeDescriptor;
import org.springframework.core.convert.converter.ConditionalGenericConverter;
import org.springframework.core.convert.converter.Converter;
import org.springframework.core.convert.converter.GenericConverter;
import org.springframework.core.log.LogAccessor;
import org.springframework.format.Formatter;
import org.springframework.format.FormatterRegistry;
import org.springframework.format.support.DefaultFormattingConversionService;
import org.springframework.kafka.config.KafkaListenerConfigUtils;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerEndpointRegistrar;
import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
import org.springframework.kafka.config.MethodKafkaListenerEndpoint;
import org.springframework.kafka.config.MultiMethodKafkaListenerEndpoint;
import org.springframework.kafka.listener.ContainerGroupSequencer;
import org.springframework.kafka.listener.KafkaListenerErrorHandler;
import org.springframework.kafka.listener.adapter.RecordFilterStrategy;
import org.springframework.kafka.retrytopic.RetryTopicBootstrapper;
import org.springframework.kafka.retrytopic.RetryTopicConfiguration;
import org.springframework.kafka.retrytopic.RetryTopicConfigurer;
import org.springframework.kafka.retrytopic.RetryTopicInternalBeanNames;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.lang.Nullable;
import org.springframework.messaging.converter.GenericMessageConverter;
import org.springframework.messaging.converter.SmartMessageConverter;
import org.springframework.messaging.handler.annotation.support.DefaultMessageHandlerMethodFactory;
import org.springframework.messaging.handler.annotation.support.MessageHandlerMethodFactory;
import org.springframework.messaging.handler.invocation.HandlerMethodArgumentResolver;
import org.springframework.messaging.handler.invocation.InvocableHandlerMethod;
import org.springframework.util.Assert;
import org.springframework.util.ReflectionUtils;
import org.springframework.util.StringUtils;
import org.springframework.validation.Validator;
/**
* Bean post-processor that registers methods annotated with {@link KafkaListener}
* to be invoked by a Kafka message listener container created under the covers
* by a {@link org.springframework.kafka.config.KafkaListenerContainerFactory}
* according to the parameters of the annotation.
*
* <p>Annotated methods can use flexible arguments as defined by {@link KafkaListener}.
*
* <p>This post-processor is automatically registered by Spring's {@link EnableKafka}
* annotation.
*
* <p>Auto-detect any {@link KafkaListenerConfigurer} instances in the container,
* allowing for customization of the registry to be used, the default container
* factory or for fine-grained control over endpoints registration. See
* {@link EnableKafka} Javadoc for complete usage details.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Stephane Nicoll
* @author Juergen Hoeller
* @author Gary Russell
* @author Artem Bilan
* @author Dariusz Szablinski
* @author Venil Noronha
* @author Dimitri Penner
* @author Filip Halemba
* @author Tomaz Fernandes
*
* @see KafkaListener
* @see KafkaListenerErrorHandler
* @see EnableKafka
* @see KafkaListenerConfigurer
* @see KafkaListenerEndpointRegistrar
* @see KafkaListenerEndpointRegistry
* @see org.springframework.kafka.config.KafkaListenerEndpoint
* @see MethodKafkaListenerEndpoint
*/
public class KafkaListenerAnnotationBeanPostProcessor<K, V>
implements BeanPostProcessor, Ordered, ApplicationContextAware, InitializingBean, SmartInitializingSingleton {
private static final String UNCHECKED = "unchecked";
private static final String THE_LEFT = "The [";
private static final String RESOLVED_TO_LEFT = "Resolved to [";
private static final String RIGHT_FOR_LEFT = "] for [";
private static final String GENERATED_ID_PREFIX = "org.springframework.kafka.KafkaListenerEndpointContainer#";
/**
* The bean name of the default {@link org.springframework.kafka.config.KafkaListenerContainerFactory}.
*/
public static final String DEFAULT_KAFKA_LISTENER_CONTAINER_FACTORY_BEAN_NAME = "kafkaListenerContainerFactory";
private final Set<Class<?>> nonAnnotatedClasses = Collections.newSetFromMap(new ConcurrentHashMap<>(64));
private final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass()));
private final ListenerScope listenerScope = new ListenerScope();
private final KafkaHandlerMethodFactoryAdapter messageHandlerMethodFactory =
new KafkaHandlerMethodFactoryAdapter();
private final KafkaListenerEndpointRegistrar registrar = new KafkaListenerEndpointRegistrar();
private final AtomicInteger counter = new AtomicInteger();
private KafkaListenerEndpointRegistry endpointRegistry;
private String defaultContainerFactoryBeanName = DEFAULT_KAFKA_LISTENER_CONTAINER_FACTORY_BEAN_NAME;
private ApplicationContext applicationContext;
private BeanFactory beanFactory;
private BeanExpressionResolver resolver = new StandardBeanExpressionResolver();
private BeanExpressionContext expressionContext;
private Charset charset = StandardCharsets.UTF_8;
private AnnotationEnhancer enhancer;
@Override
public int getOrder() {
return LOWEST_PRECEDENCE;
}
/**
* Set the {@link KafkaListenerEndpointRegistry} that will hold the created
* endpoint and manage the lifecycle of the related listener container.
* @param endpointRegistry the {@link KafkaListenerEndpointRegistry} to set.
*/
public void setEndpointRegistry(KafkaListenerEndpointRegistry endpointRegistry) {
this.endpointRegistry = endpointRegistry;
}
/**
* Set the name of the {@link KafkaListenerContainerFactory} to use by default.
* <p>If none is specified, "kafkaListenerContainerFactory" is assumed to be defined.
* @param containerFactoryBeanName the {@link KafkaListenerContainerFactory} bean name.
*/
public void setDefaultContainerFactoryBeanName(String containerFactoryBeanName) {
this.defaultContainerFactoryBeanName = containerFactoryBeanName;
}
/**
* Set the {@link MessageHandlerMethodFactory} to use to configure the message
* listener responsible to serve an endpoint detected by this processor.
* <p>By default, {@link DefaultMessageHandlerMethodFactory} is used and it
* can be configured further to support additional method arguments
* or to customize conversion and validation support. See
* {@link DefaultMessageHandlerMethodFactory} Javadoc for more details.
* @param messageHandlerMethodFactory the {@link MessageHandlerMethodFactory} instance.
*/
public void setMessageHandlerMethodFactory(MessageHandlerMethodFactory messageHandlerMethodFactory) {
this.messageHandlerMethodFactory.setHandlerMethodFactory(messageHandlerMethodFactory);
}
/**
* Return the configured handler factory.
* @return the factory.
* @since 2.5.7
*/
public MessageHandlerMethodFactory getMessageHandlerMethodFactory() {
return this.messageHandlerMethodFactory;
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
if (applicationContext instanceof ConfigurableApplicationContext) {
setBeanFactory(((ConfigurableApplicationContext) applicationContext).getBeanFactory());
}
else {
setBeanFactory(applicationContext);
}
}
/**
* Making a {@link BeanFactory} available is optional; if not set,
* {@link KafkaListenerConfigurer} beans won't get autodetected and an
* {@link #setEndpointRegistry endpoint registry} has to be explicitly configured.
* @param beanFactory the {@link BeanFactory} to be used.
*/
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
if (beanFactory instanceof ConfigurableListableBeanFactory) {
this.resolver = ((ConfigurableListableBeanFactory) beanFactory).getBeanExpressionResolver();
this.expressionContext = new BeanExpressionContext((ConfigurableListableBeanFactory) beanFactory,
this.listenerScope);
}
}
/**
* Set a charset to use when converting byte[] to String in method arguments and other
* String/byte[] conversions. Default UTF-8.
* @param charset the charset.
* @since 2.2
*/
public void setCharset(Charset charset) {
Assert.notNull(charset, "'charset' cannot be null");
this.charset = charset;
}
@Override
public void afterPropertiesSet() throws Exception {
buildEnhancer();
}
@Override
public void afterSingletonsInstantiated() {
this.registrar.setBeanFactory(this.beanFactory);
if (this.beanFactory instanceof ListableBeanFactory) {
Map<String, KafkaListenerConfigurer> instances =
((ListableBeanFactory) this.beanFactory).getBeansOfType(KafkaListenerConfigurer.class);
for (KafkaListenerConfigurer configurer : instances.values()) {
configurer.configureKafkaListeners(this.registrar);
}
}
if (this.registrar.getEndpointRegistry() == null) {
if (this.endpointRegistry == null) {
Assert.state(this.beanFactory != null,
"BeanFactory must be set to find endpoint registry by bean name");
this.endpointRegistry = this.beanFactory.getBean(
KafkaListenerConfigUtils.KAFKA_LISTENER_ENDPOINT_REGISTRY_BEAN_NAME,
KafkaListenerEndpointRegistry.class);
}
this.registrar.setEndpointRegistry(this.endpointRegistry);
}
if (this.defaultContainerFactoryBeanName != null) {
this.registrar.setContainerFactoryBeanName(this.defaultContainerFactoryBeanName);
}
// Set the custom handler method factory once resolved by the configurer
MessageHandlerMethodFactory handlerMethodFactory = this.registrar.getMessageHandlerMethodFactory();
if (handlerMethodFactory != null) {
this.messageHandlerMethodFactory.setHandlerMethodFactory(handlerMethodFactory);
}
else {
addFormatters(this.messageHandlerMethodFactory.defaultFormattingConversionService);
}
// Actually register all listeners
this.registrar.afterPropertiesSet();
Map<String, ContainerGroupSequencer> sequencers =
this.applicationContext.getBeansOfType(ContainerGroupSequencer.class, false, false);
sequencers.values().forEach(seq -> seq.initialize());
}
private void buildEnhancer() {
if (this.applicationContext != null) {
Map<String, AnnotationEnhancer> enhancersMap =
this.applicationContext.getBeansOfType(AnnotationEnhancer.class, false, false);
if (enhancersMap.size() > 0) {
List<AnnotationEnhancer> enhancers = enhancersMap.values()
.stream()
.sorted(new OrderComparator())
.collect(Collectors.toList());
this.enhancer = (attrs, element) -> {
Map<String, Object> newAttrs = attrs;
for (AnnotationEnhancer enh : enhancers) {
newAttrs = enh.apply(newAttrs, element);
}
return attrs;
};
}
}
}
@Override
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException {
return bean;
}
@Override
public Object postProcessAfterInitialization(final Object bean, final String beanName) throws BeansException {
if (!this.nonAnnotatedClasses.contains(bean.getClass())) {
Class<?> targetClass = AopUtils.getTargetClass(bean);
Collection<KafkaListener> classLevelListeners = findListenerAnnotations(targetClass);
final boolean hasClassLevelListeners = classLevelListeners.size() > 0;
final List<Method> multiMethods = new ArrayList<>();
Map<Method, Set<KafkaListener>> annotatedMethods = MethodIntrospector.selectMethods(targetClass,
(MethodIntrospector.MetadataLookup<Set<KafkaListener>>) method -> {
Set<KafkaListener> listenerMethods = findListenerAnnotations(method);
return (!listenerMethods.isEmpty() ? listenerMethods : null);
});
if (hasClassLevelListeners) {
Set<Method> methodsWithHandler = MethodIntrospector.selectMethods(targetClass,
(ReflectionUtils.MethodFilter) method ->
AnnotationUtils.findAnnotation(method, KafkaHandler.class) != null);
multiMethods.addAll(methodsWithHandler);
}
if (annotatedMethods.isEmpty()) {
this.nonAnnotatedClasses.add(bean.getClass());
this.logger.trace(() -> "No @KafkaListener annotations found on bean type: " + bean.getClass());
}
else {
// Non-empty set of methods
for (Map.Entry<Method, Set<KafkaListener>> entry : annotatedMethods.entrySet()) {
Method method = entry.getKey();
for (KafkaListener listener : entry.getValue()) {
processKafkaListener(listener, method, bean, beanName);
}
}
this.logger.debug(() -> annotatedMethods.size() + " @KafkaListener methods processed on bean '"
+ beanName + "': " + annotatedMethods);
}
if (hasClassLevelListeners) {
processMultiMethodListeners(classLevelListeners, multiMethods, bean, beanName);
}
}
return bean;
}
/*
* AnnotationUtils.getRepeatableAnnotations does not look at interfaces
*/
private Collection<KafkaListener> findListenerAnnotations(Class<?> clazz) {
Set<KafkaListener> listeners = new HashSet<>();
KafkaListener ann = AnnotatedElementUtils.findMergedAnnotation(clazz, KafkaListener.class);
if (ann != null) {
ann = enhance(clazz, ann);
listeners.add(ann);
}
KafkaListeners anns = AnnotationUtils.findAnnotation(clazz, KafkaListeners.class);
if (anns != null) {
listeners.addAll(Arrays.stream(anns.value())
.map(anno -> enhance(clazz, anno))
.collect(Collectors.toList()));
}
return listeners;
}
/*
* AnnotationUtils.getRepeatableAnnotations does not look at interfaces
*/
private Set<KafkaListener> findListenerAnnotations(Method method) {
Set<KafkaListener> listeners = new HashSet<>();
KafkaListener ann = AnnotatedElementUtils.findMergedAnnotation(method, KafkaListener.class);
if (ann != null) {
ann = enhance(method, ann);
listeners.add(ann);
}
KafkaListeners anns = AnnotationUtils.findAnnotation(method, KafkaListeners.class);
if (anns != null) {
listeners.addAll(Arrays.stream(anns.value())
.map(anno -> enhance(method, anno))
.collect(Collectors.toList()));
}
return listeners;
}
private KafkaListener enhance(AnnotatedElement element, KafkaListener ann) {
if (this.enhancer == null) {
return ann;
}
else {
return AnnotationUtils.synthesizeAnnotation(
this.enhancer.apply(AnnotationUtils.getAnnotationAttributes(ann), element), KafkaListener.class, null);
}
}
private void processMultiMethodListeners(Collection<KafkaListener> classLevelListeners, List<Method> multiMethods,
Object bean, String beanName) {
List<Method> checkedMethods = new ArrayList<>();
Method defaultMethod = null;
for (Method method : multiMethods) {
Method checked = checkProxy(method, bean);
KafkaHandler annotation = AnnotationUtils.findAnnotation(method, KafkaHandler.class);
if (annotation != null && annotation.isDefault()) {
final Method toAssert = defaultMethod;
Assert.state(toAssert == null, () -> "Only one @KafkaHandler can be marked 'isDefault', found: "
+ toAssert.toString() + " and " + method.toString());
defaultMethod = checked;
}
checkedMethods.add(checked);
}
for (KafkaListener classLevelListener : classLevelListeners) {
MultiMethodKafkaListenerEndpoint<K, V> endpoint =
new MultiMethodKafkaListenerEndpoint<>(checkedMethods, defaultMethod, bean);
String beanRef = classLevelListener.beanRef();
this.listenerScope.addListener(beanRef, bean);
processListener(endpoint, classLevelListener, bean, beanName, resolveTopics(classLevelListener),
resolveTopicPartitions(classLevelListener));
this.listenerScope.removeListener(beanRef);
}
}
protected void processKafkaListener(KafkaListener kafkaListener, Method method, Object bean, String beanName) {
Method methodToUse = checkProxy(method, bean);
MethodKafkaListenerEndpoint<K, V> endpoint = new MethodKafkaListenerEndpoint<>();
endpoint.setMethod(methodToUse);
String beanRef = kafkaListener.beanRef();
this.listenerScope.addListener(beanRef, bean);
String[] topics = resolveTopics(kafkaListener);
TopicPartitionOffset[] tps = resolveTopicPartitions(kafkaListener);
if (!processMainAndRetryListeners(kafkaListener, bean, beanName, methodToUse, endpoint, topics, tps)) {
processListener(endpoint, kafkaListener, bean, beanName, topics, tps);
}
this.listenerScope.removeListener(beanRef);
}
private boolean processMainAndRetryListeners(KafkaListener kafkaListener, Object bean, String beanName,
Method methodToUse, MethodKafkaListenerEndpoint<K, V> endpoint, String[] topics,
TopicPartitionOffset[] tps) {
String[] retryableCandidates = topics;
if (retryableCandidates.length == 0 && tps.length > 0) {
retryableCandidates = Arrays.stream(tps)
.map(tp -> tp.getTopic())
.distinct()
.collect(Collectors.toList())
.toArray(new String[0]);
}
RetryTopicConfiguration retryTopicConfiguration = new RetryTopicConfigurationProvider(this.beanFactory,
this.resolver, this.expressionContext)
.findRetryConfigurationFor(retryableCandidates, methodToUse, bean);
if (retryTopicConfiguration == null) {
String[] candidates = retryableCandidates;
this.logger.debug(() ->
"No retry topic configuration found for topics " + Arrays.toString(candidates));
return false;
}
RetryTopicConfigurer.EndpointProcessor endpointProcessor = endpointToProcess ->
this.processKafkaListenerAnnotation(endpointToProcess, kafkaListener, bean, topics, tps);
KafkaListenerContainerFactory<?> factory =
resolveContainerFactory(kafkaListener, resolve(kafkaListener.containerFactory()), beanName);
getRetryTopicConfigurer()
.processMainAndRetryListeners(endpointProcessor, endpoint, retryTopicConfiguration,
this.registrar, factory, this.defaultContainerFactoryBeanName);
return true;
}
private RetryTopicConfigurer getRetryTopicConfigurer() {
bootstrapRetryTopicIfNecessary();
return this.beanFactory.getBean(RetryTopicInternalBeanNames.RETRY_TOPIC_CONFIGURER, RetryTopicConfigurer.class);
}
private void bootstrapRetryTopicIfNecessary() {
if (!(this.beanFactory instanceof BeanDefinitionRegistry)) {
throw new IllegalStateException("BeanFactory must be an instance of "
+ BeanDefinitionRegistry.class.getSimpleName()
+ " to bootstrap the RetryTopic functionality. Provided beanFactory: "
+ this.beanFactory.getClass().getSimpleName());
}
BeanDefinitionRegistry registry = (BeanDefinitionRegistry) this.beanFactory;
if (!registry.containsBeanDefinition(RetryTopicInternalBeanNames
.RETRY_TOPIC_BOOTSTRAPPER)) {
registry.registerBeanDefinition(RetryTopicInternalBeanNames
.RETRY_TOPIC_BOOTSTRAPPER,
new RootBeanDefinition(RetryTopicBootstrapper.class));
this.beanFactory.getBean(RetryTopicInternalBeanNames
.RETRY_TOPIC_BOOTSTRAPPER, RetryTopicBootstrapper.class).bootstrapRetryTopic();
}
}
private Method checkProxy(Method methodArg, Object bean) {
Method method = methodArg;
if (AopUtils.isJdkDynamicProxy(bean)) {
try {
// Found a @KafkaListener method on the target class for this JDK proxy ->
// is it also present on the proxy itself?
method = bean.getClass().getMethod(method.getName(), method.getParameterTypes());
Class<?>[] proxiedInterfaces = ((Advised) bean).getProxiedInterfaces();
for (Class<?> iface : proxiedInterfaces) {
try {
method = iface.getMethod(method.getName(), method.getParameterTypes());
break;
}
catch (@SuppressWarnings("unused") NoSuchMethodException noMethod) {
// NOSONAR
}
}
}
catch (SecurityException ex) {
ReflectionUtils.handleReflectionException(ex);
}
catch (NoSuchMethodException ex) {
throw new IllegalStateException(String.format(
"@KafkaListener method '%s' found on bean target class '%s', " +
"but not found in any interface(s) for bean JDK proxy. Either " +
"pull the method up to an interface or switch to subclass (CGLIB) " +
"proxies by setting proxy-target-class/proxyTargetClass " +
"attribute to 'true'", method.getName(),
method.getDeclaringClass().getSimpleName()), ex);
}
}
return method;
}
protected void processListener(MethodKafkaListenerEndpoint<?, ?> endpoint, KafkaListener kafkaListener,
Object bean, String beanName, String[] topics, TopicPartitionOffset[] tps) {
processKafkaListenerAnnotation(endpoint, kafkaListener, bean, topics, tps);
String containerFactory = resolve(kafkaListener.containerFactory());
KafkaListenerContainerFactory<?> listenerContainerFactory = resolveContainerFactory(kafkaListener,
containerFactory, beanName);
this.registrar.registerEndpoint(endpoint, listenerContainerFactory);
}
private void processKafkaListenerAnnotation(MethodKafkaListenerEndpoint<?, ?> endpoint,
KafkaListener kafkaListener, Object bean, String[] topics, TopicPartitionOffset[] tps) {
endpoint.setBean(bean);
endpoint.setMessageHandlerMethodFactory(this.messageHandlerMethodFactory);
endpoint.setId(getEndpointId(kafkaListener));
endpoint.setGroupId(getEndpointGroupId(kafkaListener, endpoint.getId()));
endpoint.setTopicPartitions(tps);
endpoint.setTopics(topics);
endpoint.setTopicPattern(resolvePattern(kafkaListener));
endpoint.setClientIdPrefix(resolveExpressionAsString(kafkaListener.clientIdPrefix(), "clientIdPrefix"));
endpoint.setListenerInfo(resolveExpressionAsBytes(kafkaListener.info(), "info"));
String group = kafkaListener.containerGroup();
if (StringUtils.hasText(group)) {
Object resolvedGroup = resolveExpression(group);
if (resolvedGroup instanceof String) {
endpoint.setGroup((String) resolvedGroup);
}
}
String concurrency = kafkaListener.concurrency();
if (StringUtils.hasText(concurrency)) {
endpoint.setConcurrency(resolveExpressionAsInteger(concurrency, "concurrency"));
}
String autoStartup = kafkaListener.autoStartup();
if (StringUtils.hasText(autoStartup)) {
endpoint.setAutoStartup(resolveExpressionAsBoolean(autoStartup, "autoStartup"));
}
resolveKafkaProperties(endpoint, kafkaListener.properties());
endpoint.setSplitIterables(kafkaListener.splitIterables());
if (StringUtils.hasText(kafkaListener.batch())) {
endpoint.setBatchListener(Boolean.parseBoolean(kafkaListener.batch()));
}
endpoint.setBeanFactory(this.beanFactory);
resolveErrorHandler(endpoint, kafkaListener);
resolveContentTypeConverter(endpoint, kafkaListener);
resolveFilter(endpoint, kafkaListener);
}
private void resolveErrorHandler(MethodKafkaListenerEndpoint<?, ?> endpoint, KafkaListener kafkaListener) {
Object errorHandler = resolveExpression(kafkaListener.errorHandler());
if (errorHandler instanceof KafkaListenerErrorHandler) {
endpoint.setErrorHandler((KafkaListenerErrorHandler) errorHandler);
}
else {
String errorHandlerBeanName = resolveExpressionAsString(kafkaListener.errorHandler(), "errorHandler");
if (StringUtils.hasText(errorHandlerBeanName)) {
endpoint.setErrorHandler(
this.beanFactory.getBean(errorHandlerBeanName, KafkaListenerErrorHandler.class));
}
}
}
private void resolveContentTypeConverter(MethodKafkaListenerEndpoint<?, ?> endpoint, KafkaListener kafkaListener) {
Object converter = resolveExpression(kafkaListener.contentTypeConverter());
if (converter instanceof SmartMessageConverter) {
endpoint.setMessagingConverter((SmartMessageConverter) converter);
}
else {
String converterBeanName = resolveExpressionAsString(kafkaListener.contentTypeConverter(),
"contentTypeConverter");
if (StringUtils.hasText(converterBeanName)) {
endpoint.setMessagingConverter(
this.beanFactory.getBean(converterBeanName, SmartMessageConverter.class));
}
}
}
@SuppressWarnings({ "rawtypes", UNCHECKED })
private void resolveFilter(MethodKafkaListenerEndpoint<?, ?> endpoint, KafkaListener kafkaListener) {
Object filter = resolveExpression(kafkaListener.filter());
if (filter instanceof RecordFilterStrategy) {
endpoint.setRecordFilterStrategy((RecordFilterStrategy) filter);
}
else {
String filterBeanName = resolveExpressionAsString(kafkaListener.filter(), "filter");
if (StringUtils.hasText(filterBeanName)) {
endpoint.setRecordFilterStrategy(
this.beanFactory.getBean(filterBeanName, RecordFilterStrategy.class));
}
}
}
@Nullable
private KafkaListenerContainerFactory<?> resolveContainerFactory(KafkaListener kafkaListener,
Object factoryTarget, String beanName) {
String containerFactory = kafkaListener.containerFactory();
if (!StringUtils.hasText(containerFactory)) {
return null;
}
KafkaListenerContainerFactory<?> factory = null;
Object resolved = resolveExpression(containerFactory);
if (resolved instanceof KafkaListenerContainerFactory) {
return (KafkaListenerContainerFactory<?>) resolved;
}
String containerFactoryBeanName = resolveExpressionAsString(containerFactory,
"containerFactory");
if (StringUtils.hasText(containerFactoryBeanName)) {
assertBeanFactory();
try {
factory = this.beanFactory.getBean(containerFactoryBeanName, KafkaListenerContainerFactory.class);
}
catch (NoSuchBeanDefinitionException ex) {
throw new BeanInitializationException(
noBeanFoundMessage(factoryTarget, beanName, containerFactoryBeanName,
KafkaListenerContainerFactory.class), ex);
}
}
return factory;
}
protected void assertBeanFactory() {
Assert.state(this.beanFactory != null, "BeanFactory must be set to obtain container factory by bean name");
}
protected String noBeanFoundMessage(Object target, String listenerBeanName, String requestedBeanName,
Class<?> expectedClass) {
return "Could not register Kafka listener endpoint on ["
+ target + "] for bean " + listenerBeanName + ", no '" + expectedClass.getSimpleName() + "' with id '"
+ requestedBeanName + "' was found in the application context";
}
@SuppressWarnings(UNCHECKED)
private void resolveKafkaProperties(MethodKafkaListenerEndpoint<?, ?> endpoint, String[] propertyStrings) {
if (propertyStrings.length > 0) {
Properties properties = new Properties();
for (String property : propertyStrings) {
Object value = resolveExpression(property);
if (value instanceof String) {
loadProperty(properties, property, value);
}
else if (value instanceof String[]) {
for (String prop : (String[]) value) {
loadProperty(properties, prop, prop);
}
}
else if (value instanceof Collection) {
Collection<?> values = (Collection<?>) value;
if (values.size() > 0 && values.iterator().next() instanceof String) {
for (String prop : (Collection<String>) value) {
loadProperty(properties, prop, prop);
}
}
}
else {
throw new IllegalStateException("'properties' must resolve to a String, a String[] or "
+ "Collection<String>");
}
}
endpoint.setConsumerProperties(properties);
}
}
private void loadProperty(Properties properties, String property, Object value) {
try {
properties.load(new StringReader((String) value));
}
catch (IOException e) {
this.logger.error(e, () -> "Failed to load property " + property + ", continuing...");
}
}
private String getEndpointId(KafkaListener kafkaListener) {
if (StringUtils.hasText(kafkaListener.id())) {
return resolveExpressionAsString(kafkaListener.id(), "id");
}
else {
return GENERATED_ID_PREFIX + this.counter.getAndIncrement();
}
}
private String getEndpointGroupId(KafkaListener kafkaListener, String id) {
String groupId = null;
if (StringUtils.hasText(kafkaListener.groupId())) {
groupId = resolveExpressionAsString(kafkaListener.groupId(), "groupId");
}
if (groupId == null && kafkaListener.idIsGroup() && StringUtils.hasText(kafkaListener.id())) {
groupId = id;
}
return groupId;
}
private TopicPartitionOffset[] resolveTopicPartitions(KafkaListener kafkaListener) {
TopicPartition[] topicPartitions = kafkaListener.topicPartitions();
List<TopicPartitionOffset> result = new ArrayList<>();
if (topicPartitions.length > 0) {
for (TopicPartition topicPartition : topicPartitions) {
result.addAll(resolveTopicPartitionsList(topicPartition));
}
}
return result.toArray(new TopicPartitionOffset[0]);
}
private String[] resolveTopics(KafkaListener kafkaListener) {
String[] topics = kafkaListener.topics();
List<String> result = new ArrayList<>();
if (topics.length > 0) {
for (String topic1 : topics) {
Object topic = resolveExpression(topic1);
resolveAsString(topic, result);
}
}
return result.toArray(new String[0]);
}
private Pattern resolvePattern(KafkaListener kafkaListener) {
Pattern pattern = null;
String text = kafkaListener.topicPattern();
if (StringUtils.hasText(text)) {
Object resolved = resolveExpression(text);
if (resolved instanceof Pattern) {
pattern = (Pattern) resolved;
}
else if (resolved instanceof String) {
pattern = Pattern.compile((String) resolved);
}
else if (resolved != null) {
throw new IllegalStateException(
"topicPattern must resolve to a Pattern or String, not " + resolved.getClass());
}
}
return pattern;
}
private List<TopicPartitionOffset> resolveTopicPartitionsList(TopicPartition topicPartition) {
Object topic = resolveExpression(topicPartition.topic());
Assert.state(topic instanceof String,
() -> "topic in @TopicPartition must resolve to a String, not " + topic.getClass());
Assert.state(StringUtils.hasText((String) topic), "topic in @TopicPartition must not be empty");
String[] partitions = topicPartition.partitions();
PartitionOffset[] partitionOffsets = topicPartition.partitionOffsets();
Assert.state(partitions.length > 0 || partitionOffsets.length > 0,
() -> "At least one 'partition' or 'partitionOffset' required in @TopicPartition for topic '" + topic + "'");
List<TopicPartitionOffset> result = new ArrayList<>();
for (String partition : partitions) {
resolvePartitionAsInteger((String) topic, resolveExpression(partition), result, null, false, false);
}
if (partitionOffsets.length == 1 && partitionOffsets[0].partition().equals("*")) {
result.forEach(tpo -> {
tpo.setOffset(resolveInitialOffset(tpo.getTopic(), partitionOffsets[0]));
tpo.setRelativeToCurrent(isRelative(tpo.getTopic(), partitionOffsets[0]));
});
}
else {
for (PartitionOffset partitionOffset : partitionOffsets) {
Assert.isTrue(!partitionOffset.partition().equals("*"), () ->
"Partition wildcard '*' is only allowed in a single @PartitionOffset in " + result);
resolvePartitionAsInteger((String) topic, resolveExpression(partitionOffset.partition()), result,
resolveInitialOffset(topic, partitionOffset), isRelative(topic, partitionOffset), true);
}
}
Assert.isTrue(result.size() > 0, () -> "At least one partition required for " + topic);
return result;
}
private Long resolveInitialOffset(Object topic, PartitionOffset partitionOffset) {
Object initialOffsetValue = resolveExpression(partitionOffset.initialOffset());
Long initialOffset;
if (initialOffsetValue instanceof String) {
Assert.state(StringUtils.hasText((String) initialOffsetValue),
() -> "'initialOffset' in @PartitionOffset for topic '" + topic + "' cannot be empty");
initialOffset = Long.valueOf((String) initialOffsetValue);
}
else if (initialOffsetValue instanceof Long) {
initialOffset = (Long) initialOffsetValue;
}
else {
throw new IllegalArgumentException(String.format(
"@PartitionOffset for topic '%s' can't resolve '%s' as a Long or String, resolved to '%s'",
topic, partitionOffset.initialOffset(), initialOffsetValue.getClass()));
}
return initialOffset;
}
private boolean isRelative(Object topic, PartitionOffset partitionOffset) {
Object relativeToCurrentValue = resolveExpression(partitionOffset.relativeToCurrent());
Boolean relativeToCurrent;
if (relativeToCurrentValue instanceof String) {
relativeToCurrent = Boolean.valueOf((String) relativeToCurrentValue);
}
else if (relativeToCurrentValue instanceof Boolean) {
relativeToCurrent = (Boolean) relativeToCurrentValue;
}
else {
throw new IllegalArgumentException(String.format(
"@PartitionOffset for topic '%s' can't resolve '%s' as a Boolean or String, resolved to '%s'",
topic, partitionOffset.relativeToCurrent(), relativeToCurrentValue.getClass()));
}
return relativeToCurrent;
}
@SuppressWarnings(UNCHECKED)
private void resolveAsString(Object resolvedValue, List<String> result) {
if (resolvedValue instanceof String[]) {
for (Object object : (String[]) resolvedValue) {
resolveAsString(object, result);
}
}
else if (resolvedValue instanceof String) {
result.add((String) resolvedValue);
}
else if (resolvedValue instanceof Iterable) {
for (Object object : (Iterable<Object>) resolvedValue) {
resolveAsString(object, result);
}
}
else {
throw new IllegalArgumentException(String.format(
"@KafKaListener can't resolve '%s' as a String", resolvedValue));
}
}
@SuppressWarnings(UNCHECKED)
private void resolvePartitionAsInteger(String topic, Object resolvedValue,
List<TopicPartitionOffset> result, @Nullable Long offset, boolean isRelative, boolean checkDups) {
if (resolvedValue instanceof String[]) {
for (Object object : (String[]) resolvedValue) {
resolvePartitionAsInteger(topic, object, result, offset, isRelative, checkDups);
}
}
else if (resolvedValue instanceof String) {
Assert.state(StringUtils.hasText((String) resolvedValue),
() -> "partition in @TopicPartition for topic '" + topic + "' cannot be empty");
List<TopicPartitionOffset> collected = parsePartitions((String) resolvedValue)
.map(part -> new TopicPartitionOffset(topic, part, offset, isRelative))
.collect(Collectors.toList());
if (checkDups) {
collected.forEach(tpo -> {
Assert.state(!result.contains(tpo), () ->
String.format("@TopicPartition can't have the same partition configuration twice: [%s]",
tpo));
});
}
result.addAll(collected);
}
else if (resolvedValue instanceof Integer[]) {
for (Integer partition : (Integer[]) resolvedValue) {
result.add(new TopicPartitionOffset(topic, partition));
}
}
else if (resolvedValue instanceof Integer) {
result.add(new TopicPartitionOffset(topic, (Integer) resolvedValue));
}
else if (resolvedValue instanceof Iterable) {
for (Object object : (Iterable<Object>) resolvedValue) {
resolvePartitionAsInteger(topic, object, result, offset, isRelative, checkDups);
}
}
else {
throw new IllegalArgumentException(String.format(
"@KafKaListener for topic '%s' can't resolve '%s' as an Integer or String", topic, resolvedValue));
}
}
private String resolveExpressionAsString(String value, String attribute) {
Object resolved = resolveExpression(value);
if (resolved instanceof String) {
return (String) resolved;
}
else if (resolved != null) {
throw new IllegalStateException(THE_LEFT + attribute + "] must resolve to a String. "
+ RESOLVED_TO_LEFT + resolved.getClass() + RIGHT_FOR_LEFT + value + "]");
}
return null;
}
@Nullable
private byte[] resolveExpressionAsBytes(String value, String attribute) {
Object resolved = resolveExpression(value);
if (resolved instanceof String) {
if (StringUtils.hasText((CharSequence) resolved)) {
return ((String) resolved).getBytes(this.charset);
}
}
else if (resolved instanceof byte[]) {
return (byte[]) resolved;
}
else if (resolved != null) {
throw new IllegalStateException(THE_LEFT + attribute + "] must resolve to a String or byte[]. "
+ RESOLVED_TO_LEFT + resolved.getClass() + RIGHT_FOR_LEFT + value + "]");
}
return null;
}
private Integer resolveExpressionAsInteger(String value, String attribute) {
Object resolved = resolveExpression(value);
Integer result = null;
if (resolved instanceof String) {
result = Integer.parseInt((String) resolved);
}
else if (resolved instanceof Number) {
result = ((Number) resolved).intValue();
}
else if (resolved != null) {
throw new IllegalStateException(
THE_LEFT + attribute + "] must resolve to an Number or a String that can be parsed as an Integer. "
+ RESOLVED_TO_LEFT + resolved.getClass() + RIGHT_FOR_LEFT + value + "]");
}
return result;
}
private Boolean resolveExpressionAsBoolean(String value, String attribute) {
Object resolved = resolveExpression(value);
Boolean result = null;
if (resolved instanceof Boolean) {
result = (Boolean) resolved;
}
else if (resolved instanceof String) {
result = Boolean.parseBoolean((String) resolved);
}
else if (resolved != null) {
throw new IllegalStateException(
THE_LEFT + attribute + "] must resolve to a Boolean or a String that can be parsed as a Boolean. "
+ RESOLVED_TO_LEFT + resolved.getClass() + RIGHT_FOR_LEFT + value + "]");
}
return result;
}
private Object resolveExpression(String value) {
return this.resolver.evaluate(resolve(value), this.expressionContext);
}
/**
* Resolve the specified value if possible.
* @param value the value to resolve
* @return the resolved value
* @see ConfigurableBeanFactory#resolveEmbeddedValue
*/
private String resolve(String value) {
if (this.beanFactory != null && this.beanFactory instanceof ConfigurableBeanFactory) {
return ((ConfigurableBeanFactory) this.beanFactory).resolveEmbeddedValue(value);
}
return value;
}
private void addFormatters(FormatterRegistry registry) {
for (Converter<?, ?> converter : getBeansOfType(Converter.class)) {
registry.addConverter(converter);
}
for (GenericConverter converter : getBeansOfType(GenericConverter.class)) {
registry.addConverter(converter);
}
for (Formatter<?> formatter : getBeansOfType(Formatter.class)) {
registry.addFormatter(formatter);
}
}
private <T> Collection<T> getBeansOfType(Class<T> type) {
if (KafkaListenerAnnotationBeanPostProcessor.this.beanFactory instanceof ListableBeanFactory) {
return ((ListableBeanFactory) KafkaListenerAnnotationBeanPostProcessor.this.beanFactory)
.getBeansOfType(type)
.values();
}
else {
return Collections.emptySet();
}
}
/**
* Parse a list of partitions into a {@link List}. Example: "0-5,10-15".
* @param partsString the comma-delimited list of partitions/ranges.
* @return the stream of partition numbers, sorted and de-duplicated.
* @since 2.6.4
*/
private Stream<Integer> parsePartitions(String partsString) {
String[] partsStrings = partsString.split(",");
if (partsStrings.length == 1 && !partsStrings[0].contains("-")) {
return Stream.of(Integer.parseInt(partsStrings[0].trim()));
}
List<Integer> parts = new ArrayList<>();
for (String part : partsStrings) {
if (part.contains("-")) {
String[] startEnd = part.split("-");
Assert.state(startEnd.length == 2, "Only one hyphen allowed for a range of partitions: " + part);
int start = Integer.parseInt(startEnd[0].trim());
int end = Integer.parseInt(startEnd[1].trim());
Assert.state(end >= start, "Invalid range: " + part);
for (int i = start; i <= end; i++) {
parts.add(i);
}
}
else {
parsePartitions(part).forEach(p -> parts.add(p));
}
}
return parts.stream()
.sorted()
.distinct();
}
/**
* An {@link MessageHandlerMethodFactory} adapter that offers a configurable underlying
* instance to use. Useful if the factory to use is determined once the endpoints
* have been registered but not created yet.
* @see KafkaListenerEndpointRegistrar#setMessageHandlerMethodFactory
*/
private class KafkaHandlerMethodFactoryAdapter implements MessageHandlerMethodFactory {
private final DefaultFormattingConversionService defaultFormattingConversionService =
new DefaultFormattingConversionService();
private MessageHandlerMethodFactory handlerMethodFactory;
public void setHandlerMethodFactory(MessageHandlerMethodFactory kafkaHandlerMethodFactory1) {
this.handlerMethodFactory = kafkaHandlerMethodFactory1;
}
@Override
public InvocableHandlerMethod createInvocableHandlerMethod(Object bean, Method method) {
return getHandlerMethodFactory().createInvocableHandlerMethod(bean, method);
}
private MessageHandlerMethodFactory getHandlerMethodFactory() {
if (this.handlerMethodFactory == null) {
this.handlerMethodFactory = createDefaultMessageHandlerMethodFactory();
}
return this.handlerMethodFactory;
}
private MessageHandlerMethodFactory createDefaultMessageHandlerMethodFactory() {
DefaultMessageHandlerMethodFactory defaultFactory = new DefaultMessageHandlerMethodFactory();
Validator validator = KafkaListenerAnnotationBeanPostProcessor.this.registrar.getValidator();
if (validator != null) {
defaultFactory.setValidator(validator);
}
defaultFactory.setBeanFactory(KafkaListenerAnnotationBeanPostProcessor.this.beanFactory);
this.defaultFormattingConversionService.addConverter(
new BytesToStringConverter(KafkaListenerAnnotationBeanPostProcessor.this.charset));
this.defaultFormattingConversionService.addConverter(new BytesToNumberConverter());
defaultFactory.setConversionService(this.defaultFormattingConversionService);
GenericMessageConverter messageConverter = new GenericMessageConverter(this.defaultFormattingConversionService);
defaultFactory.setMessageConverter(messageConverter);
List<HandlerMethodArgumentResolver> customArgumentsResolver =
new ArrayList<>(KafkaListenerAnnotationBeanPostProcessor.this.registrar.getCustomMethodArgumentResolvers());
// Has to be at the end - look at PayloadMethodArgumentResolver documentation
customArgumentsResolver.add(new KafkaNullAwarePayloadArgumentResolver(messageConverter, validator));
defaultFactory.setCustomArgumentResolvers(customArgumentsResolver);
defaultFactory.afterPropertiesSet();
return defaultFactory;
}
}
private static class BytesToStringConverter implements Converter<byte[], String> {
private final Charset charset;
BytesToStringConverter(Charset charset) {
this.charset = charset;
}
@Override
public String convert(byte[] source) {
return new String(source, this.charset);
}
}
static class ListenerScope implements Scope {
private final Map<String, Object> listeners = new HashMap<>();
ListenerScope() {
}
public void addListener(String key, Object bean) {
this.listeners.put(key, bean);
}
public void removeListener(String key) {
this.listeners.remove(key);
}
@Override
public Object get(String name, ObjectFactory<?> objectFactory) {
return this.listeners.get(name);
}
@Override
public Object remove(String name) {
return null;
}
@Override
public void registerDestructionCallback(String name, Runnable callback) {
}
@Override
public Object resolveContextualObject(String key) {
return this.listeners.get(key);
}
@Override
public String getConversationId() {
return null;
}
}
/**
* Post processes each set of annotation attributes.
*
* @since 2.7.2
*
*/
public interface AnnotationEnhancer extends BiFunction<Map<String, Object>, AnnotatedElement, Map<String, Object>> {
}
private final class BytesToNumberConverter implements ConditionalGenericConverter {
BytesToNumberConverter() {
}
@Override
@Nullable
public Set<ConvertiblePair> getConvertibleTypes() {
HashSet<ConvertiblePair> pairs = new HashSet<>();
pairs.add(new ConvertiblePair(byte[].class, long.class));
pairs.add(new ConvertiblePair(byte[].class, int.class));
pairs.add(new ConvertiblePair(byte[].class, short.class));
pairs.add(new ConvertiblePair(byte[].class, byte.class));
pairs.add(new ConvertiblePair(byte[].class, Long.class));
pairs.add(new ConvertiblePair(byte[].class, Integer.class));
pairs.add(new ConvertiblePair(byte[].class, Short.class));
pairs.add(new ConvertiblePair(byte[].class, Byte.class));
return pairs;
}
@Override
@Nullable
public Object convert(@Nullable Object source, TypeDescriptor sourceType, TypeDescriptor targetType) {
byte[] bytes = (byte[]) source;
if (targetType.getType().equals(long.class) || targetType.getType().equals(Long.class)) {
Assert.state(bytes.length >= 8, "At least 8 bytes needed to convert a byte[] to a long"); // NOSONAR
return ByteBuffer.wrap(bytes).getLong();
}
else if (targetType.getType().equals(int.class) || targetType.getType().equals(Integer.class)) {
Assert.state(bytes.length >= 4, "At least 4 bytes needed to convert a byte[] to an integer"); // NOSONAR
return ByteBuffer.wrap(bytes).getInt();
}
else if (targetType.getType().equals(short.class) || targetType.getType().equals(Short.class)) {
Assert.state(bytes.length >= 2, "At least 2 bytes needed to convert a byte[] to a short");
return ByteBuffer.wrap(bytes).getShort();
}
else if (targetType.getType().equals(byte.class) || targetType.getType().equals(Byte.class)) {
Assert.state(bytes.length >= 1, "At least 1 byte needed to convert a byte[] to a byte");
return ByteBuffer.wrap(bytes).get();
}
return null;
}
@Override
public boolean matches(TypeDescriptor sourceType, TypeDescriptor targetType) {
if (sourceType.getType().equals(byte[].class)) {
Class<?> target = targetType.getType();
return target.equals(long.class) || target.equals(int.class) || target.equals(short.class) // NOSONAR
|| target.equals(byte.class) || target.equals(Long.class) || target.equals(Integer.class)
|| target.equals(Short.class) || target.equals(Byte.class);
}
else {
return false;
}
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/KafkaListenerConfigurationSelector.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import org.springframework.context.annotation.DeferredImportSelector;
import org.springframework.core.annotation.Order;
import org.springframework.core.type.AnnotationMetadata;
/**
* A {@link DeferredImportSelector} implementation with the lowest order to import a
* {@link KafkaBootstrapConfiguration} as late as possible.
*
* @author Artem Bilan
*
* @since 2.3
*/
@Order
public class KafkaListenerConfigurationSelector implements DeferredImportSelector {
@Override
public String[] selectImports(AnnotationMetadata importingClassMetadata) {
return new String[] { KafkaBootstrapConfiguration.class.getName() };
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/KafkaListenerConfigurer.java | /*
* Copyright 2002-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import org.springframework.kafka.config.KafkaListenerEndpointRegistrar;
/**
* Optional interface to be implemented by Spring managed bean willing
* to customize how Kafka listener endpoints are configured. Typically
* used to defined the default
* {@link org.springframework.kafka.config.KafkaListenerContainerFactory
* KafkaListenerContainerFactory} to use or for registering Kafka endpoints
* in a <em>programmatic</em> fashion as opposed to the <em>declarative</em>
* approach of using the @{@link KafkaListener} annotation.
*
* <p>See @{@link EnableKafka} for detailed usage examples.
*
* @author Stephane Nicoll
*
* @see EnableKafka
* @see org.springframework.kafka.config.KafkaListenerEndpointRegistrar
*/
public interface KafkaListenerConfigurer {
/**
* Callback allowing a {@link org.springframework.kafka.config.KafkaListenerEndpointRegistry
* KafkaListenerEndpointRegistry} and specific {@link org.springframework.kafka.config.KafkaListenerEndpoint
* KafkaListenerEndpoint} instances to be registered against the given
* {@link KafkaListenerEndpointRegistrar}. The default
* {@link org.springframework.kafka.config.KafkaListenerContainerFactory KafkaListenerContainerFactory}
* can also be customized.
* @param registrar the registrar to be configured
*/
void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/KafkaListeners.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Container annotation that aggregates several {@link KafkaListener} annotations.
* <p>
* Can be used natively, declaring several nested {@link KafkaListener} annotations.
* Can also be used in conjunction with Java 8's support for repeatable annotations,
* where {@link KafkaListener} can simply be declared several times on the same method
* (or class), implicitly generating this container annotation.
*
* @author Gary Russell
*
* @see KafkaListener
*/
@Target({ ElementType.TYPE, ElementType.METHOD, ElementType.ANNOTATION_TYPE })
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface KafkaListeners {
KafkaListener[] value();
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/KafkaNullAwarePayloadArgumentResolver.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.util.List;
import org.springframework.core.MethodParameter;
import org.springframework.kafka.support.KafkaNull;
import org.springframework.messaging.Message;
import org.springframework.messaging.converter.MessageConverter;
import org.springframework.messaging.handler.annotation.support.PayloadMethodArgumentResolver;
import org.springframework.validation.Validator;
/**
* {@link PayloadMethodArgumentResolver} that can properly decode {@link KafkaNull}
* payloads, returning {@code null}. When using a custom
* {@link org.springframework.messaging.handler.annotation.support.MessageHandlerMethodFactory},
* add this resolver if you need to handle tombstone records with null values.
*
* @author Gary Russell
* @since 2.7.4
*
*/
public class KafkaNullAwarePayloadArgumentResolver extends PayloadMethodArgumentResolver {
KafkaNullAwarePayloadArgumentResolver(MessageConverter messageConverter, Validator validator) {
super(messageConverter, validator);
}
@Override
public Object resolveArgument(MethodParameter parameter, Message<?> message) throws Exception { // NOSONAR
Object resolved = super.resolveArgument(parameter, message);
/*
* Replace KafkaNull list elements with null.
*/
if (resolved instanceof List) {
List<?> list = ((List<?>) resolved);
for (int i = 0; i < list.size(); i++) {
if (list.get(i) instanceof KafkaNull) {
list.set(i, null);
}
}
}
return resolved;
}
@Override
protected boolean isEmptyPayload(Object payload) {
return payload == null || payload instanceof KafkaNull;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/KafkaStreamsDefaultConfiguration.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.util.HashSet;
import java.util.Set;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.UnsatisfiedDependencyException;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.KafkaStreamsConfiguration;
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
import org.springframework.kafka.config.StreamsBuilderFactoryBeanConfigurer;
/**
* {@code @Configuration} class that registers a {@link StreamsBuilderFactoryBean}
* if {@link org.apache.kafka.streams.StreamsConfig} with the name
* {@link KafkaStreamsDefaultConfiguration#DEFAULT_STREAMS_CONFIG_BEAN_NAME} is present
* in the application context. Otherwise a {@link UnsatisfiedDependencyException} is thrown.
*
* <p>This configuration class is automatically imported when using the @{@link EnableKafkaStreams}
* annotation. See {@link EnableKafkaStreams} Javadoc for complete usage.
*
* @author Artem Bilan
* @author Gary Russell
*
* @since 1.1.4
*/
@Configuration(proxyBeanMethods = false)
public class KafkaStreamsDefaultConfiguration {
/**
* The bean name for the {@link org.apache.kafka.streams.StreamsConfig} to be used for the default
* {@link StreamsBuilderFactoryBean} bean definition.
*/
public static final String DEFAULT_STREAMS_CONFIG_BEAN_NAME = "defaultKafkaStreamsConfig";
/**
* The bean name for auto-configured default {@link StreamsBuilderFactoryBean}.
*/
public static final String DEFAULT_STREAMS_BUILDER_BEAN_NAME = "defaultKafkaStreamsBuilder";
/**
* Bean for the default {@link StreamsBuilderFactoryBean}.
* @param streamsConfigProvider the streams config.
* @param configurerProvider the configurer.
*
* @return the factory bean.
*/
@Bean(name = DEFAULT_STREAMS_BUILDER_BEAN_NAME)
public StreamsBuilderFactoryBean defaultKafkaStreamsBuilder(
@Qualifier(DEFAULT_STREAMS_CONFIG_BEAN_NAME)
ObjectProvider<KafkaStreamsConfiguration> streamsConfigProvider,
ObjectProvider<StreamsBuilderFactoryBeanConfigurer> configurerProvider) {
KafkaStreamsConfiguration streamsConfig = streamsConfigProvider.getIfAvailable();
if (streamsConfig != null) {
StreamsBuilderFactoryBean fb = new StreamsBuilderFactoryBean(streamsConfig);
Set<StreamsBuilderFactoryBeanConfigurer> configuredBy = new HashSet<>();
configurerProvider.orderedStream().forEach(configurer -> {
configurer.configure(fb);
configuredBy.add(configurer);
});
return fb;
}
else {
throw new UnsatisfiedDependencyException(KafkaStreamsDefaultConfiguration.class.getName(),
DEFAULT_STREAMS_BUILDER_BEAN_NAME, "streamsConfig", "There is no '" +
DEFAULT_STREAMS_CONFIG_BEAN_NAME + "' " + KafkaStreamsConfiguration.class.getName() +
" bean in the application context.\n" +
"Consider declaring one or don't use @EnableKafkaStreams.");
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/PartitionOffset.java | /*
* Copyright 2016-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Used to add partition/initial offset information to a {@code KafkaListener}.
*
* @author Artem Bilan
* @author Gary Russell
*/
@Target({})
@Retention(RetentionPolicy.RUNTIME)
public @interface PartitionOffset {
/**
* The partition within the topic to listen on. Property place holders and SpEL
* expressions are supported, which must resolve to Integer (or String that can be
* parsed as Integer). '*' indicates that the initial offset will be applied to all
* partitions in the encompassing {@link TopicPartition} The string can contain a
* comma-delimited list of partitions, or ranges of partitions (e.g.
* {@code 0-5, 7, 10-15}, in which case, the offset will be applied to all of those
* partitions.
* @return partition within the topic.
*/
String partition();
/**
* The initial offset of the {@link #partition()}.
* Property place holders and SpEL expressions are supported,
* which must resolve to Long (or String that can be parsed as Long).
* @return initial offset.
*/
String initialOffset();
/**
* By default, positive {@link #initialOffset()} is absolute, negative
* is relative to the current topic end. When this is 'true', the
* initial offset (positive or negative) is relative to the current
* consumer position.
* @return whether or not the offset is relative to the current position.
* @since 1.1
*/
String relativeToCurrent() default "false";
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/RetryTopicConfigurationProvider.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.lang.reflect.Method;
import java.util.Map;
import org.apache.commons.logging.LogFactory;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.ListableBeanFactory;
import org.springframework.beans.factory.config.BeanExpressionContext;
import org.springframework.beans.factory.config.BeanExpressionResolver;
import org.springframework.beans.factory.config.ConfigurableBeanFactory;
import org.springframework.context.expression.StandardBeanExpressionResolver;
import org.springframework.core.annotation.AnnotationUtils;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.retrytopic.RetryTopicConfiguration;
/**
*
* Attempts to provide an instance of
* {@link org.springframework.kafka.retrytopic.RetryTopicConfiguration} by either creating
* one from a {@link RetryableTopic} annotation, or from the bean container if no
* annotation is available.
*
* If beans are found in the container there's a check to determine whether or not the
* provided topics topics should be handled by any of such instances.
*
* If the annotation is provided, a
* {@link org.springframework.kafka.annotation.DltHandler} annotated method is looked up.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @since 2.7
* @see org.springframework.kafka.retrytopic.RetryTopicConfigurer
* @see RetryableTopic
* @see org.springframework.kafka.annotation.DltHandler
*
*/
public class RetryTopicConfigurationProvider {
private final BeanFactory beanFactory;
private final BeanExpressionResolver resolver;
private final BeanExpressionContext expressionContext;
private static final LogAccessor LOGGER = new LogAccessor(LogFactory.getLog(RetryTopicConfigurationProvider.class));
/**
* Construct an instance using the provided bean factory and default resolver and bean
* expression context.
* @param beanFactory the bean factory.
*/
public RetryTopicConfigurationProvider(BeanFactory beanFactory) {
this(beanFactory, new StandardBeanExpressionResolver(), beanFactory instanceof ConfigurableBeanFactory
? new BeanExpressionContext((ConfigurableBeanFactory) beanFactory, null)
: null); // NOSONAR
}
/**
* Construct an instance using the provided parameters.
* @param beanFactory the bean factory.
* @param resolver the bean expression resolver.
* @param expressionContext the bean expression context.
*/
public RetryTopicConfigurationProvider(BeanFactory beanFactory, BeanExpressionResolver resolver,
BeanExpressionContext expressionContext) {
this.beanFactory = beanFactory;
this.resolver = resolver;
this.expressionContext = expressionContext;
}
public RetryTopicConfiguration findRetryConfigurationFor(String[] topics, Method method, Object bean) {
RetryableTopic annotation = AnnotationUtils.findAnnotation(method, RetryableTopic.class);
return annotation != null
? new RetryableTopicAnnotationProcessor(this.beanFactory, this.resolver, this.expressionContext)
.processAnnotation(topics, method, annotation, bean)
: maybeGetFromContext(topics);
}
private RetryTopicConfiguration maybeGetFromContext(String[] topics) {
if (this.beanFactory == null || !ListableBeanFactory.class.isAssignableFrom(this.beanFactory.getClass())) {
LOGGER.warn("No ListableBeanFactory found, skipping RetryTopic configuration.");
return null;
}
Map<String, RetryTopicConfiguration> retryTopicProcessors = ((ListableBeanFactory) this.beanFactory)
.getBeansOfType(RetryTopicConfiguration.class);
return retryTopicProcessors
.values()
.stream()
.filter(topicConfiguration -> topicConfiguration.hasConfigurationForTopics(topics))
.findFirst()
.orElse(null);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/RetryableTopic.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.springframework.kafka.retrytopic.DltStrategy;
import org.springframework.kafka.retrytopic.FixedDelayStrategy;
import org.springframework.kafka.retrytopic.RetryTopicConstants;
import org.springframework.kafka.retrytopic.TopicSuffixingStrategy;
import org.springframework.retry.annotation.Backoff;
/**
*
* Annotation to create the retry and dlt topics for a {@link KafkaListener} annotated
* listener. See {@link org.springframework.kafka.retrytopic.RetryTopicConfigurer} for
* usage examples. All String properties can be resolved from property placeholders
* {@code ${...}} or SpEL expressions {@code #{...}}.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @since 2.7
*
* @see org.springframework.kafka.retrytopic.RetryTopicConfigurer
*/
@Target({ ElementType.METHOD })
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface RetryableTopic {
/**
* The number of attempts made before the message is sent to the DLT. Expressions must
* resolve to an integer or a string that can be parsed as such. Default 3.
* @return the number of attempts.
*/
String attempts() default "3";
/**
* Specify the backoff properties for retrying this operation. The default is a simple
* {@link Backoff} specification with no properties - see it's documentation for
* defaults.
* @return a backoff specification
*/
Backoff backoff() default @Backoff;
/**
*
* The amount of time in milliseconds after which message retrying should give up and
* send the message to the DLT. Expressions must resolv to a long or a String that can
* be parsed as such.
* @return the timeout value.
*
*/
String timeout() default "";
/**
*
* The bean name of the {@link org.springframework.kafka.core.KafkaTemplate} bean that
* will be used to forward the message to the retry and Dlt topics. If not specified,
* a bean with name {@code retryTopicDefaultKafkaTemplate} or {@code kafkaTemplate}
* will be looked up.
*
* @return the kafkaTemplate bean name.
*/
String kafkaTemplate() default "";
/**
* The bean name of the
* {@link org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory}
* that will be used to create the consumers for the retry and dlt topics. If none is
* provided, the one from the {@link KafkaListener} annotation is used, or else a
* default one, if any.
*
* @return the listenerContainerFactory bean name.
*/
String listenerContainerFactory() default "";
/**
* Whether or not the topics should be created after registration with the provided
* configurations. Not to be confused with the
* ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG from Kafka configuration, which is
* handled by the {@link org.apache.kafka.clients.consumer.KafkaConsumer}.
* Expressions must resolve to a boolean or a String that can be parsed as such.
* @return the configuration.
*/
String autoCreateTopics() default "true";
/**
* The number of partitions for the automatically created topics. Expressions must
* resolve to an integer or a String that can be parsed as such. Default 1.
* @return the number of partitions.
*/
String numPartitions() default "1";
/**
* The replication factor for the automatically created topics. Expressions must
* resolve to a short or a String that can be parsed as such.
* @return the replication factor.
*/
String replicationFactor() default "1";
/**
* The exception types that should be retried.
* @return the exceptions.
*/
Class<? extends Throwable>[] include() default {};
/**
* The exception types that should not be retried. When the message processing throws
* these exceptions the message goes straight to the DLT.
* @return the exceptions not to be retried.
*/
Class<? extends Throwable>[] exclude() default {};
/**
* The exception class names that should be retried.
* @return the exceptions.
*/
String[] includeNames() default {};
/**
* The exception class names that should not be retried. When the message processing
* throws these exceptions the message goes straight to the DLT.
* @return the exceptions not to be retried.
*/
String[] excludeNames() default {};
/**
* Whether or not the captured exception should be traversed to look for the
* exceptions provided above. Expressions must resolve to a boolean or a String that
* can be parsed as such. Default true when {@link #include()} or {@link #exclude()}
* provided; false otherwise.
* @return the value.
*/
String traversingCauses() default "";
/**
* The suffix that will be appended to the main topic in order to generate the retry
* topics. The corresponding delay value is also appended.
* @return the retry topics' suffix.
*/
String retryTopicSuffix() default RetryTopicConstants.DEFAULT_RETRY_SUFFIX;
/**
* The suffix that will be appended to the main topic in order to generate the dlt
* topic.
* @return the dlt suffix.
*/
String dltTopicSuffix() default RetryTopicConstants.DEFAULT_DLT_SUFFIX;
/**
* Whether the retry topics will be suffixed with the delay value for that topic or a
* simple index.
* @return the strategy.
*/
TopicSuffixingStrategy topicSuffixingStrategy() default TopicSuffixingStrategy.SUFFIX_WITH_DELAY_VALUE;
/**
* Whether or not create a DLT, and redeliver to the DLT if delivery fails or just give up.
* @return the dlt strategy.
*/
DltStrategy dltStrategy() default DltStrategy.ALWAYS_RETRY_ON_ERROR;
/**
* Whether to use a single or multiple topics when using a fixed delay.
* @return the fixed delay strategy.
*/
FixedDelayStrategy fixedDelayTopicStrategy() default FixedDelayStrategy.MULTIPLE_TOPICS;
/**
* Override the container factory's {@code autoStartup} property for just the DLT container.
* Usually used to not start the DLT container when {@code autoStartup} is true.
* @return whether or not to override the factory.
* @since 2.8
*/
String autoStartDltHandler() default "";
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/RetryableTopicAnnotationProcessor.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanInitializationException;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.beans.factory.config.BeanExpressionContext;
import org.springframework.beans.factory.config.BeanExpressionResolver;
import org.springframework.beans.factory.config.ConfigurableBeanFactory;
import org.springframework.context.expression.BeanFactoryResolver;
import org.springframework.context.expression.StandardBeanExpressionResolver;
import org.springframework.core.annotation.AnnotationUtils;
import org.springframework.expression.spel.support.StandardEvaluationContext;
import org.springframework.kafka.core.KafkaOperations;
import org.springframework.kafka.retrytopic.RetryTopicConfiguration;
import org.springframework.kafka.retrytopic.RetryTopicConfigurationBuilder;
import org.springframework.kafka.retrytopic.RetryTopicConfigurer;
import org.springframework.kafka.retrytopic.RetryTopicConstants;
import org.springframework.kafka.retrytopic.RetryTopicInternalBeanNames;
import org.springframework.kafka.support.EndpointHandlerMethod;
import org.springframework.retry.annotation.Backoff;
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
import org.springframework.retry.backoff.ExponentialRandomBackOffPolicy;
import org.springframework.retry.backoff.FixedBackOffPolicy;
import org.springframework.retry.backoff.SleepingBackOffPolicy;
import org.springframework.retry.backoff.UniformRandomBackOffPolicy;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.ReflectionUtils;
import org.springframework.util.StringUtils;
/**
* Processes the provided {@link RetryableTopic} annotation
* returning an {@link RetryTopicConfiguration}.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @since 2.7
*
*/
public class RetryableTopicAnnotationProcessor {
private static final String NULL = "null";
private static final String THE_OSQ = "The [";
private static final String RESOLVED_TO_OSQ = "Resolved to [";
private static final String CSQ = "]";
private static final String CSQ_FOR_OSQ = "] for [";
private final BeanFactory beanFactory;
private final BeanExpressionResolver resolver;
private final BeanExpressionContext expressionContext;
private static final String DEFAULT_SPRING_BOOT_KAFKA_TEMPLATE_NAME = "kafkaTemplate";
/**
* Construct an instance using the provided parameters and default resolver,
* expression context.
* @param beanFactory the bean factory.
*/
public RetryableTopicAnnotationProcessor(BeanFactory beanFactory) {
this(beanFactory, new StandardBeanExpressionResolver(), beanFactory instanceof ConfigurableBeanFactory
? new BeanExpressionContext((ConfigurableBeanFactory) beanFactory, null)
: null); // NOSONAR
}
/**
* Construct an instance using the provided parameters.
* @param beanFactory the bean factory.
* @param resolver the bean expression resolver.
* @param expressionContext the bean expression context.
*/
public RetryableTopicAnnotationProcessor(BeanFactory beanFactory, BeanExpressionResolver resolver,
BeanExpressionContext expressionContext) {
this.beanFactory = beanFactory;
this.resolver = resolver;
this.expressionContext = expressionContext;
}
public RetryTopicConfiguration processAnnotation(String[] topics, Method method, RetryableTopic annotation,
Object bean) {
Long resolvedTimeout = resolveExpressionAsLong(annotation.timeout(), "timeout", false);
long timeout = RetryTopicConstants.NOT_SET;
if (resolvedTimeout != null) {
timeout = resolvedTimeout;
}
List<Class<? extends Throwable>> includes = resolveClasses(annotation.include(), annotation.includeNames(),
"include");
List<Class<? extends Throwable>> excludes = resolveClasses(annotation.exclude(), annotation.excludeNames(),
"exclude");
boolean traverse = false;
if (StringUtils.hasText(annotation.traversingCauses())) {
Boolean traverseResolved = resolveExpressionAsBoolean(annotation.traversingCauses(), "traversingCauses");
if (traverseResolved != null) {
traverse = traverseResolved;
}
else {
traverse = includes.size() > 0 || excludes.size() > 0;
}
}
Boolean autoStartDlt = null;
if (StringUtils.hasText(annotation.autoStartDltHandler())) {
autoStartDlt = resolveExpressionAsBoolean(annotation.autoStartDltHandler(), "autoStartDltContainer");
}
return RetryTopicConfigurationBuilder.newInstance()
.maxAttempts(resolveExpressionAsInteger(annotation.attempts(), "attempts", true))
.customBackoff(createBackoffFromAnnotation(annotation.backoff(), this.beanFactory))
.retryTopicSuffix(resolveExpressionAsString(annotation.retryTopicSuffix(), "retryTopicSuffix"))
.dltSuffix(resolveExpressionAsString(annotation.dltTopicSuffix(), "dltTopicSuffix"))
.dltHandlerMethod(getDltProcessor(method, bean))
.includeTopics(Arrays.asList(topics))
.listenerFactory(annotation.listenerContainerFactory())
.autoCreateTopics(resolveExpressionAsBoolean(annotation.autoCreateTopics(), "autoCreateTopics"),
resolveExpressionAsInteger(annotation.numPartitions(), "numPartitions", true),
resolveExpressionAsShort(annotation.replicationFactor(), "replicationFactor", true))
.retryOn(includes)
.notRetryOn(excludes)
.traversingCauses(traverse)
.useSingleTopicForFixedDelays(annotation.fixedDelayTopicStrategy())
.dltProcessingFailureStrategy(annotation.dltStrategy())
.autoStartDltHandler(autoStartDlt)
.setTopicSuffixingStrategy(annotation.topicSuffixingStrategy())
.timeoutAfter(timeout)
.create(getKafkaTemplate(annotation.kafkaTemplate(), topics));
}
private SleepingBackOffPolicy<?> createBackoffFromAnnotation(Backoff backoff, BeanFactory beanFactory) { // NOSONAR
StandardEvaluationContext evaluationContext = new StandardEvaluationContext();
evaluationContext.setBeanResolver(new BeanFactoryResolver(beanFactory));
// Code from Spring Retry
Long min = backoff.delay() == 0 ? backoff.value() : backoff.delay();
if (StringUtils.hasText(backoff.delayExpression())) {
min = resolveExpressionAsLong(backoff.delayExpression(), "delayExpression", true);
}
Long max = backoff.maxDelay();
if (StringUtils.hasText(backoff.maxDelayExpression())) {
max = resolveExpressionAsLong(backoff.maxDelayExpression(), "maxDelayExpression", true);
}
Double multiplier = backoff.multiplier();
if (StringUtils.hasText(backoff.multiplierExpression())) {
multiplier = resolveExpressionAsDouble(backoff.multiplierExpression(), "multiplierExpression", true);
}
if (multiplier != null && multiplier > 0) {
ExponentialBackOffPolicy policy = new ExponentialBackOffPolicy();
if (backoff.random()) {
policy = new ExponentialRandomBackOffPolicy();
}
policy.setInitialInterval(min);
policy.setMultiplier(multiplier);
policy.setMaxInterval(max > min ? max : ExponentialBackOffPolicy.DEFAULT_MAX_INTERVAL);
return policy;
}
if (max != null && min != null && max > min) {
UniformRandomBackOffPolicy policy = new UniformRandomBackOffPolicy();
policy.setMinBackOffPeriod(min);
policy.setMaxBackOffPeriod(max);
return policy;
}
FixedBackOffPolicy policy = new FixedBackOffPolicy();
if (min != null) {
policy.setBackOffPeriod(min);
}
return policy;
}
private EndpointHandlerMethod getDltProcessor(Method listenerMethod, Object bean) {
Class<?> declaringClass = listenerMethod.getDeclaringClass();
return Arrays.stream(ReflectionUtils.getDeclaredMethods(declaringClass))
.filter(method -> AnnotationUtils.findAnnotation(method, DltHandler.class) != null)
.map(method -> RetryTopicConfigurer.createHandlerMethodWith(bean, method))
.findFirst()
.orElse(RetryTopicConfigurer.DEFAULT_DLT_HANDLER);
}
private KafkaOperations<?, ?> getKafkaTemplate(String kafkaTemplateName, String[] topics) {
if (StringUtils.hasText(kafkaTemplateName)) {
Assert.state(this.beanFactory != null, "BeanFactory must be set to obtain kafka template by bean name");
try {
return this.beanFactory.getBean(kafkaTemplateName, KafkaOperations.class);
}
catch (NoSuchBeanDefinitionException ex) {
throw new BeanInitializationException("Could not register Kafka listener endpoint for topics "
+ Arrays.asList(topics) + ", no " + KafkaOperations.class.getSimpleName()
+ " with id '" + kafkaTemplateName + "' was found in the application context", ex);
}
}
try {
return this.beanFactory.getBean(RetryTopicInternalBeanNames.DEFAULT_KAFKA_TEMPLATE_BEAN_NAME,
KafkaOperations.class);
}
catch (NoSuchBeanDefinitionException ex) {
try {
return this.beanFactory.getBean(DEFAULT_SPRING_BOOT_KAFKA_TEMPLATE_NAME, KafkaOperations.class);
}
catch (NoSuchBeanDefinitionException exc) {
exc.addSuppressed(ex);
throw new BeanInitializationException("Could not find a KafkaTemplate to configure the retry topics.", // NOSONAR (lost stack trace)
exc);
}
}
}
private String resolveExpressionAsString(String value, String attribute) {
Object resolved = resolveExpression(value);
if (resolved instanceof String) {
return (String) resolved;
}
else if (resolved != null) {
throw new IllegalStateException(THE_OSQ + attribute + "] must resolve to a String. "
+ RESOLVED_TO_OSQ + resolved.getClass() + CSQ_FOR_OSQ + value + CSQ);
}
return null;
}
private Integer resolveExpressionAsInteger(String value, String attribute, boolean required) {
Object resolved = resolveExpression(value);
Integer result = null;
if (resolved instanceof String) {
if (!required && !StringUtils.hasText((String) resolved)) {
result = null;
}
else {
result = Integer.parseInt((String) resolved);
}
}
else if (resolved instanceof Number) {
result = ((Number) resolved).intValue();
}
else if (resolved != null || required) {
throw new IllegalStateException(
THE_OSQ + attribute + "] must resolve to an Number or a String that can be parsed as an Integer. "
+ RESOLVED_TO_OSQ + (resolved == null ? NULL : resolved.getClass())
+ CSQ_FOR_OSQ + value + CSQ);
}
return result;
}
private Short resolveExpressionAsShort(String value, String attribute, boolean required) {
Object resolved = resolveExpression(value);
Short result = null;
if (resolved instanceof String) {
if (!required && !StringUtils.hasText((String) resolved)) {
result = null;
}
else {
result = Short.parseShort((String) resolved);
}
}
else if (resolved instanceof Number) {
result = ((Number) resolved).shortValue();
}
else if (resolved != null || required) {
throw new IllegalStateException(
THE_OSQ + attribute + "] must resolve to an Number or a String that can be parsed as a Short. "
+ RESOLVED_TO_OSQ + (resolved == null ? NULL : resolved.getClass())
+ CSQ_FOR_OSQ + value + CSQ);
}
return result;
}
private Long resolveExpressionAsLong(String value, String attribute, boolean required) {
Object resolved = resolveExpression(value);
Long result = null;
if (resolved instanceof String) {
if (!required && !StringUtils.hasText((String) resolved)) {
result = null;
}
else {
result = Long.parseLong((String) resolved);
}
}
else if (resolved instanceof Number) {
result = ((Number) resolved).longValue();
}
else if (resolved != null || required) {
throw new IllegalStateException(
THE_OSQ + attribute + "] must resolve to an Number or a String that can be parsed as a Long. "
+ RESOLVED_TO_OSQ + (resolved == null ? NULL : resolved.getClass())
+ CSQ_FOR_OSQ + value + CSQ);
}
return result;
}
private Double resolveExpressionAsDouble(String value, String attribute, boolean required) {
Object resolved = resolveExpression(value);
Double result = null;
if (resolved instanceof String) {
if (!required && !StringUtils.hasText((String) resolved)) {
result = null;
}
else {
result = Double.parseDouble((String) resolved);
}
}
else if (resolved instanceof Number) {
result = ((Number) resolved).doubleValue();
}
else if (resolved != null || required) {
throw new IllegalStateException(
THE_OSQ + attribute + "] must resolve to an Number or a String that can be parsed as a Double. "
+ RESOLVED_TO_OSQ + (resolved == null ? NULL : resolved.getClass())
+ CSQ_FOR_OSQ + value + CSQ);
}
return result;
}
private Boolean resolveExpressionAsBoolean(String value, String attribute) {
Object resolved = resolveExpression(value);
Boolean result = null;
if (resolved instanceof Boolean) {
result = (Boolean) resolved;
}
else if (resolved instanceof String) {
result = Boolean.parseBoolean((String) resolved);
}
else if (resolved != null) {
throw new IllegalStateException(
THE_OSQ + attribute + "] must resolve to a Boolean or a String that can be parsed as a Boolean. "
+ RESOLVED_TO_OSQ + resolved.getClass() + CSQ_FOR_OSQ + value + CSQ);
}
return result;
}
@SuppressWarnings("unchecked")
private List<Class<? extends Throwable>> resolveClasses(Class<? extends Throwable>[] fromAnnot, String[] names,
String type) {
List<Class<? extends Throwable>> classes = new ArrayList<>(Arrays.asList(fromAnnot));
try {
for (String name : names) {
Class<?> clazz = ClassUtils.forName(name, ClassUtils.getDefaultClassLoader());
if (!Throwable.class.isAssignableFrom(clazz)) {
throw new IllegalStateException(type + " entry must be of type Throwable: " + clazz);
}
classes.add((Class<? extends Throwable>) clazz);
}
}
catch (ClassNotFoundException | LinkageError ex) {
throw new IllegalStateException(ex);
}
return classes;
}
private Object resolveExpression(String value) {
String resolved = resolve(value);
if (this.expressionContext != null) {
return this.resolver.evaluate(resolved, this.expressionContext);
}
else {
return value;
}
}
private String resolve(String value) {
if (this.beanFactory != null && this.beanFactory instanceof ConfigurableBeanFactory) {
return ((ConfigurableBeanFactory) this.beanFactory).resolveEmbeddedValue(value);
}
return value;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/TopicPartition.java | /*
* Copyright 2016-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.annotation;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Used to add topic/partition information to a {@code KafkaListener}.
*
* @author Gary Russell
* @author Artem Bilan
*
*/
@Target({})
@Retention(RetentionPolicy.RUNTIME)
public @interface TopicPartition {
/**
* The topic to listen on.
* @return the topic to listen on. Property place holders
* and SpEL expressions are supported, which must resolve
* to a String.
*/
String topic();
/**
* The partitions within the topic. Partitions specified here can't be duplicated in
* {@link #partitionOffsets()}. Each string can contain a comma-delimited list of
* partitions, or ranges of partitions (e.g. {@code 0-5, 7, 10-15}.
* @return the partitions within the topic. Property place holders and SpEL
* expressions are supported, which must resolve to Integers (or Strings that can be
* parsed as Integers).
*/
String[] partitions() default {};
/**
* The partitions with initial offsets within the topic. There must only be one
* instance of {@link PartitionOffset} if its 'partition' property is '*'.
* Partitions specified here can't be duplicated in the {@link #partitions()}.
* @return the {@link PartitionOffset} array.
*/
PartitionOffset[] partitionOffsets() default {};
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/annotation/package-info.java | /**
* Package for kafka annotations
*/
package org.springframework.kafka.annotation;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/AbstractKafkaListenerContainerFactory.java | /*
* Copyright 2014-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.util.Arrays;
import java.util.Collection;
import java.util.regex.Pattern;
import org.apache.commons.logging.LogFactory;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.ApplicationEventPublisherAware;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
import org.springframework.kafka.listener.AfterRollbackProcessor;
import org.springframework.kafka.listener.BatchErrorHandler;
import org.springframework.kafka.listener.BatchInterceptor;
import org.springframework.kafka.listener.CommonErrorHandler;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.listener.ErrorHandler;
import org.springframework.kafka.listener.GenericErrorHandler;
import org.springframework.kafka.listener.RecordInterceptor;
import org.springframework.kafka.listener.adapter.BatchToRecordAdapter;
import org.springframework.kafka.listener.adapter.RecordFilterStrategy;
import org.springframework.kafka.listener.adapter.ReplyHeadersConfigurer;
import org.springframework.kafka.requestreply.ReplyingKafkaOperations;
import org.springframework.kafka.support.JavaUtils;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.kafka.support.converter.MessageConverter;
import org.springframework.retry.RecoveryCallback;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.util.Assert;
/**
* Base {@link KafkaListenerContainerFactory} for Spring's base container implementation.
*
* @param <C> the {@link AbstractMessageListenerContainer} implementation type.
* @param <K> the key type.
* @param <V> the value type.
*
* @author Stephane Nicoll
* @author Gary Russell
* @author Artem Bilan
*
* @see AbstractMessageListenerContainer
*/
public abstract class AbstractKafkaListenerContainerFactory<C extends AbstractMessageListenerContainer<K, V>, K, V>
implements KafkaListenerContainerFactory<C>, ApplicationEventPublisherAware, InitializingBean,
ApplicationContextAware {
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); // NOSONAR protected
private final ContainerProperties containerProperties = new ContainerProperties((Pattern) null); // NOSONAR
private GenericErrorHandler<?> errorHandler;
private CommonErrorHandler commonErrorHandler;
private ConsumerFactory<? super K, ? super V> consumerFactory;
private Boolean autoStartup;
private Integer phase;
private MessageConverter messageConverter;
private RecordFilterStrategy<? super K, ? super V> recordFilterStrategy;
private Boolean ackDiscarded;
private RetryTemplate retryTemplate;
private RecoveryCallback<? extends Object> recoveryCallback;
private Boolean statefulRetry;
private Boolean batchListener;
private ApplicationEventPublisher applicationEventPublisher;
private KafkaTemplate<?, ?> replyTemplate;
private AfterRollbackProcessor<? super K, ? super V> afterRollbackProcessor;
private ReplyHeadersConfigurer replyHeadersConfigurer;
private Boolean missingTopicsFatal;
private RecordInterceptor<K, V> recordInterceptor;
private BatchInterceptor<K, V> batchInterceptor;
private BatchToRecordAdapter<K, V> batchToRecordAdapter;
private ApplicationContext applicationContext;
private ContainerCustomizer<K, V, C> containerCustomizer;
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
/**
* Specify a {@link ConsumerFactory} to use.
* @param consumerFactory The consumer factory.
*/
public void setConsumerFactory(ConsumerFactory<? super K, ? super V> consumerFactory) {
this.consumerFactory = consumerFactory;
}
public ConsumerFactory<? super K, ? super V> getConsumerFactory() {
return this.consumerFactory;
}
/**
* Specify an {@code autoStartup boolean} flag.
* @param autoStartup true for auto startup.
* @see AbstractMessageListenerContainer#setAutoStartup(boolean)
*/
public void setAutoStartup(Boolean autoStartup) {
this.autoStartup = autoStartup;
}
/**
* Specify a {@code phase} to use.
* @param phase The phase.
* @see AbstractMessageListenerContainer#setPhase(int)
*/
public void setPhase(int phase) {
this.phase = phase;
}
/**
* Set the message converter to use if dynamic argument type matching is needed.
* @param messageConverter the converter.
*/
public void setMessageConverter(MessageConverter messageConverter) {
this.messageConverter = messageConverter;
}
/**
* Set the record filter strategy.
* @param recordFilterStrategy the strategy.
*/
public void setRecordFilterStrategy(RecordFilterStrategy<? super K, ? super V> recordFilterStrategy) {
this.recordFilterStrategy = recordFilterStrategy;
}
/**
* Set to true to ack discards when a filter strategy is in use.
* @param ackDiscarded the ackDiscarded.
*/
public void setAckDiscarded(Boolean ackDiscarded) {
this.ackDiscarded = ackDiscarded;
}
/**
* Set a retryTemplate.
* @param retryTemplate the template.
* @deprecated since 2.8 - use a suitably configured error handler instead.
*/
@Deprecated
public void setRetryTemplate(RetryTemplate retryTemplate) {
this.retryTemplate = retryTemplate;
}
/**
* Set a callback to be used with the {@link #setRetryTemplate(RetryTemplate)
* retryTemplate}.
* @param recoveryCallback the callback.
* @deprecated since 2.8 - use a suitably configured error handler instead.
*/
@Deprecated
public void setRecoveryCallback(RecoveryCallback<? extends Object> recoveryCallback) {
this.recoveryCallback = recoveryCallback;
}
/**
* When using a {@link RetryTemplate} Set to true to enable stateful retry. Use in
* conjunction with a
* {@link org.springframework.kafka.listener.SeekToCurrentErrorHandler} when retry can
* take excessive time; each failure goes back to the broker, to keep the Consumer
* alive.
* @param statefulRetry true to enable stateful retry.
* @since 2.1.3
* @deprecated since 2.8 - use a suitably configured error handler instead.
*/
@Deprecated
public void setStatefulRetry(boolean statefulRetry) {
this.statefulRetry = statefulRetry;
}
/**
* Return true if this endpoint creates a batch listener.
* @return true for a batch listener.
* @since 1.1
*/
public Boolean isBatchListener() {
return this.batchListener;
}
/**
* Set to true if this endpoint should create a batch listener.
* @param batchListener true for a batch listener.
* @since 1.1
*/
public void setBatchListener(Boolean batchListener) {
this.batchListener = batchListener;
}
@Override
public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) {
this.applicationEventPublisher = applicationEventPublisher;
}
/**
* Set the {@link KafkaTemplate} to use to send replies.
* @param replyTemplate the template.
* @since 2.0
*/
public void setReplyTemplate(KafkaTemplate<?, ?> replyTemplate) {
if (replyTemplate instanceof ReplyingKafkaOperations) {
this.logger.warn(
"The 'replyTemplate' should not be an implementation of 'ReplyingKafkaOperations'; "
+ "such implementations are for client-side request/reply operations; here we "
+ "are simply sending a reply to an incoming request so the reply container will "
+ "never be used and will consume unnecessary resources.");
}
this.replyTemplate = replyTemplate;
}
/**
* Set the error handler to call when the listener throws an exception.
* @param errorHandler the error handler.
* @since 2.2
* @deprecated in favor of {@link #setCommonErrorHandler(CommonErrorHandler)}
* @see #setCommonErrorHandler(CommonErrorHandler)
*/
@Deprecated
public void setErrorHandler(ErrorHandler errorHandler) {
this.errorHandler = errorHandler;
}
/**
* Set the batch error handler to call when the listener throws an exception.
* @param errorHandler the error handler.
* @since 2.2
* @deprecated in favor of {@link #setCommonErrorHandler(CommonErrorHandler)}
* @see #setCommonErrorHandler(CommonErrorHandler)
*/
@Deprecated
public void setBatchErrorHandler(BatchErrorHandler errorHandler) {
this.errorHandler = errorHandler;
}
/**
* Set the {@link CommonErrorHandler} which can handle errors for both record
* and batch listeners. Replaces the use of {@link GenericErrorHandler}s.
* @param commonErrorHandler the handler.
* @since 2.8
*/
public void setCommonErrorHandler(CommonErrorHandler commonErrorHandler) {
this.commonErrorHandler = commonErrorHandler;
}
/**
* Set a processor to invoke after a transaction rollback; typically will
* seek the unprocessed topic/partition to reprocess the records.
* The default does so, including the failed record.
* @param afterRollbackProcessor the processor.
* @since 1.3.5
*/
public void setAfterRollbackProcessor(AfterRollbackProcessor<? super K, ? super V> afterRollbackProcessor) {
this.afterRollbackProcessor = afterRollbackProcessor;
}
/**
* Set a configurer which will be invoked when creating a reply message.
* @param replyHeadersConfigurer the configurer.
* @since 2.2
*/
public void setReplyHeadersConfigurer(ReplyHeadersConfigurer replyHeadersConfigurer) {
this.replyHeadersConfigurer = replyHeadersConfigurer;
}
/**
* Set to false to allow the container to start even if any of the configured topics
* are not present on the broker. Does not apply when topic patterns are configured.
* Default true;
* @param missingTopicsFatal the missingTopicsFatal.
* @since 2.3
*/
public void setMissingTopicsFatal(boolean missingTopicsFatal) {
this.missingTopicsFatal = missingTopicsFatal;
}
/**
* Obtain the properties template for this factory - set properties as needed
* and they will be copied to a final properties instance for the endpoint.
* @return the properties.
*/
public ContainerProperties getContainerProperties() {
return this.containerProperties;
}
/**
* Set an interceptor to be called before calling the listener.
* Only used with record listeners.
* @param recordInterceptor the interceptor.
* @since 2.2.7
* @see #setBatchInterceptor(BatchInterceptor)
*/
public void setRecordInterceptor(RecordInterceptor<K, V> recordInterceptor) {
this.recordInterceptor = recordInterceptor;
}
/**
* Set a batch interceptor to be called before and after calling the listener.
* Only used with batch listeners.
* @param batchInterceptor the interceptor.
* @since 2.7
* @see #setRecordInterceptor(RecordInterceptor)
*/
public void setBatchInterceptor(BatchInterceptor<K, V> batchInterceptor) {
this.batchInterceptor = batchInterceptor;
}
/**
* Set a {@link BatchToRecordAdapter}.
* @param batchToRecordAdapter the adapter.
* @since 2.4.2
*/
public void setBatchToRecordAdapter(BatchToRecordAdapter<K, V> batchToRecordAdapter) {
this.batchToRecordAdapter = batchToRecordAdapter;
}
/**
* Set a customizer used to further configure a container after it has been created.
* @param containerCustomizer the customizer.
* @since 2.3.4
*/
public void setContainerCustomizer(ContainerCustomizer<K, V, C> containerCustomizer) {
this.containerCustomizer = containerCustomizer;
}
@Override
public void afterPropertiesSet() {
if (this.commonErrorHandler == null && this.errorHandler != null) {
if (Boolean.TRUE.equals(this.batchListener)) {
Assert.state(this.errorHandler instanceof BatchErrorHandler,
() -> "The error handler must be a BatchErrorHandler, not " +
this.errorHandler.getClass().getName());
}
else {
Assert.state(this.errorHandler instanceof ErrorHandler,
() -> "The error handler must be an ErrorHandler, not " +
this.errorHandler.getClass().getName());
}
}
}
@SuppressWarnings("unchecked")
@Override
public C createListenerContainer(KafkaListenerEndpoint endpoint) {
C instance = createContainerInstance(endpoint);
JavaUtils.INSTANCE
.acceptIfNotNull(endpoint.getId(), instance::setBeanName);
if (endpoint instanceof AbstractKafkaListenerEndpoint) {
configureEndpoint((AbstractKafkaListenerEndpoint<K, V>) endpoint);
}
endpoint.setupListenerContainer(instance, this.messageConverter);
initializeContainer(instance, endpoint);
customizeContainer(instance);
return instance;
}
@SuppressWarnings("deprecation")
private void configureEndpoint(AbstractKafkaListenerEndpoint<K, V> aklEndpoint) {
if (aklEndpoint.getRecordFilterStrategy() == null) {
JavaUtils.INSTANCE
.acceptIfNotNull(this.recordFilterStrategy, aklEndpoint::setRecordFilterStrategy);
}
JavaUtils.INSTANCE
.acceptIfNotNull(this.ackDiscarded, aklEndpoint::setAckDiscarded)
.acceptIfNotNull(this.retryTemplate, aklEndpoint::setRetryTemplate)
.acceptIfNotNull(this.recoveryCallback, aklEndpoint::setRecoveryCallback)
.acceptIfNotNull(this.statefulRetry, aklEndpoint::setStatefulRetry)
.acceptIfNotNull(this.replyTemplate, aklEndpoint::setReplyTemplate)
.acceptIfNotNull(this.replyHeadersConfigurer, aklEndpoint::setReplyHeadersConfigurer)
.acceptIfNotNull(this.batchToRecordAdapter, aklEndpoint::setBatchToRecordAdapter);
if (aklEndpoint.getBatchListener() == null) {
JavaUtils.INSTANCE
.acceptIfNotNull(this.batchListener, aklEndpoint::setBatchListener);
}
}
/**
* Create an empty container instance.
* @param endpoint the endpoint.
* @return the new container instance.
*/
protected abstract C createContainerInstance(KafkaListenerEndpoint endpoint);
/**
* Further initialize the specified container.
* <p>Subclasses can inherit from this method to apply extra
* configuration if necessary.
* @param instance the container instance to configure.
* @param endpoint the endpoint.
*/
@SuppressWarnings("deprecation")
protected void initializeContainer(C instance, KafkaListenerEndpoint endpoint) {
ContainerProperties properties = instance.getContainerProperties();
BeanUtils.copyProperties(this.containerProperties, properties, "topics", "topicPartitions", "topicPattern",
"messageListener", "ackCount", "ackTime", "subBatchPerPartition", "kafkaConsumerProperties");
JavaUtils.INSTANCE
.acceptIfNotNull(this.afterRollbackProcessor, instance::setAfterRollbackProcessor)
.acceptIfCondition(this.containerProperties.getAckCount() > 0, this.containerProperties.getAckCount(),
properties::setAckCount)
.acceptIfCondition(this.containerProperties.getAckTime() > 0, this.containerProperties.getAckTime(),
properties::setAckTime)
.acceptIfNotNull(this.containerProperties.getSubBatchPerPartition(),
properties::setSubBatchPerPartition)
.acceptIfNotNull(this.errorHandler, instance::setGenericErrorHandler)
.acceptIfNotNull(this.commonErrorHandler, instance::setCommonErrorHandler)
.acceptIfNotNull(this.missingTopicsFatal, instance.getContainerProperties()::setMissingTopicsFatal);
Boolean autoStart = endpoint.getAutoStartup();
if (autoStart != null) {
instance.setAutoStartup(autoStart);
}
else if (this.autoStartup != null) {
instance.setAutoStartup(this.autoStartup);
}
instance.setRecordInterceptor(this.recordInterceptor);
instance.setBatchInterceptor(this.batchInterceptor);
JavaUtils.INSTANCE
.acceptIfNotNull(this.phase, instance::setPhase)
.acceptIfNotNull(this.applicationContext, instance::setApplicationContext)
.acceptIfNotNull(this.applicationEventPublisher, instance::setApplicationEventPublisher)
.acceptIfHasText(endpoint.getGroupId(), instance.getContainerProperties()::setGroupId)
.acceptIfHasText(endpoint.getClientIdPrefix(), instance.getContainerProperties()::setClientId)
.acceptIfNotNull(endpoint.getConsumerProperties(),
instance.getContainerProperties()::setKafkaConsumerProperties)
.acceptIfNotNull(endpoint.getListenerInfo(), instance::setListenerInfo);
}
private void customizeContainer(C instance) {
if (this.containerCustomizer != null) {
this.containerCustomizer.configure(instance);
}
}
@Override
public C createContainer(TopicPartitionOffset... topicsAndPartitions) {
KafkaListenerEndpoint endpoint = new KafkaListenerEndpointAdapter() {
@Override
public TopicPartitionOffset[] getTopicPartitionsToAssign() {
return Arrays.copyOf(topicsAndPartitions, topicsAndPartitions.length);
}
};
C container = createContainerInstance(endpoint);
initializeContainer(container, endpoint);
customizeContainer(container);
return container;
}
@Override
public C createContainer(String... topics) {
KafkaListenerEndpoint endpoint = new KafkaListenerEndpointAdapter() {
@Override
public Collection<String> getTopics() {
return Arrays.asList(topics);
}
};
C container = createContainerInstance(endpoint);
initializeContainer(container, endpoint);
customizeContainer(container);
return container;
}
@Override
public C createContainer(Pattern topicPattern) {
KafkaListenerEndpoint endpoint = new KafkaListenerEndpointAdapter() {
@Override
public Pattern getTopicPattern() {
return topicPattern;
}
};
C container = createContainerInstance(endpoint);
initializeContainer(container, endpoint);
customizeContainer(container);
return container;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/AbstractKafkaListenerEndpoint.java | /*
* Copyright 2014-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Properties;
import java.util.regex.Pattern;
import org.apache.commons.logging.LogFactory;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanFactoryAware;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.config.BeanExpressionContext;
import org.springframework.beans.factory.config.BeanExpressionResolver;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.context.expression.BeanFactoryResolver;
import org.springframework.core.log.LogAccessor;
import org.springframework.expression.BeanResolver;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.BatchMessageListener;
import org.springframework.kafka.listener.MessageListener;
import org.springframework.kafka.listener.MessageListenerContainer;
import org.springframework.kafka.listener.adapter.BatchToRecordAdapter;
import org.springframework.kafka.listener.adapter.FilteringBatchMessageListenerAdapter;
import org.springframework.kafka.listener.adapter.FilteringMessageListenerAdapter;
import org.springframework.kafka.listener.adapter.MessagingMessageListenerAdapter;
import org.springframework.kafka.listener.adapter.RecordFilterStrategy;
import org.springframework.kafka.listener.adapter.ReplyHeadersConfigurer;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.kafka.support.converter.MessageConverter;
import org.springframework.lang.Nullable;
import org.springframework.retry.RecoveryCallback;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.util.Assert;
import org.springframework.util.ObjectUtils;
/**
* Base model for a Kafka listener endpoint.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Stephane Nicoll
* @author Gary Russell
* @author Artem Bilan
*
* @see MethodKafkaListenerEndpoint
*/
public abstract class AbstractKafkaListenerEndpoint<K, V>
implements KafkaListenerEndpoint, BeanFactoryAware, InitializingBean {
private final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass()));
private String id;
private String groupId;
private final Collection<String> topics = new ArrayList<>();
private Pattern topicPattern;
private final Collection<TopicPartitionOffset> topicPartitions = new ArrayList<>();
private BeanFactory beanFactory;
private BeanExpressionResolver resolver;
private BeanExpressionContext expressionContext;
private BeanResolver beanResolver;
private String group;
private RecordFilterStrategy<K, V> recordFilterStrategy;
private boolean ackDiscarded;
private RetryTemplate retryTemplate;
private RecoveryCallback<? extends Object> recoveryCallback;
private boolean statefulRetry;
private Boolean batchListener;
private KafkaTemplate<?, ?> replyTemplate;
private String clientIdPrefix;
private Integer concurrency;
private Boolean autoStartup;
private ReplyHeadersConfigurer replyHeadersConfigurer;
private Properties consumerProperties;
private boolean splitIterables = true;
private BatchToRecordAdapter<K, V> batchToRecordAdapter;
private byte[] listenerInfo;
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanFactory = beanFactory;
if (beanFactory instanceof ConfigurableListableBeanFactory) {
this.resolver = ((ConfigurableListableBeanFactory) beanFactory).getBeanExpressionResolver();
this.expressionContext = new BeanExpressionContext((ConfigurableListableBeanFactory) beanFactory, null);
}
this.beanResolver = new BeanFactoryResolver(beanFactory);
}
@Nullable
protected BeanFactory getBeanFactory() {
return this.beanFactory;
}
@Nullable
protected BeanExpressionResolver getResolver() {
return this.resolver;
}
@Nullable
protected BeanExpressionContext getBeanExpressionContext() {
return this.expressionContext;
}
@Nullable
protected BeanResolver getBeanResolver() {
return this.beanResolver;
}
public void setId(String id) {
this.id = id;
}
@Nullable
@Override
public String getId() {
return this.id;
}
/**
* Set the group id to override the {@code group.id} property in the
* ContainerFactory.
* @param groupId the group id.
* @since 1.3
*/
public void setGroupId(String groupId) {
this.groupId = groupId;
}
@Nullable
@Override
public String getGroupId() {
return this.groupId;
}
/**
* Set the topics to use. Either these or 'topicPattern' or 'topicPartitions'
* should be provided, but not a mixture.
* @param topics to set.
* @see #setTopicPartitions(TopicPartitionOffset...)
* @see #setTopicPattern(Pattern)
*/
public void setTopics(String... topics) {
Assert.notNull(topics, "'topics' must not be null");
this.topics.clear();
this.topics.addAll(Arrays.asList(topics));
}
/**
* Return the topics for this endpoint.
* @return the topics for this endpoint.
*/
@Override
public Collection<String> getTopics() {
return Collections.unmodifiableCollection(this.topics);
}
/**
* Set the topicPartitions to use.
* Either this or 'topic' or 'topicPattern'
* should be provided, but not a mixture.
* @param topicPartitions to set.
* @since 2.3
* @see #setTopics(String...)
* @see #setTopicPattern(Pattern)
*/
public void setTopicPartitions(TopicPartitionOffset... topicPartitions) {
Assert.notNull(topicPartitions, "'topics' must not be null");
this.topicPartitions.clear();
this.topicPartitions.addAll(Arrays.asList(topicPartitions));
}
/**
* Return the topicPartitions for this endpoint.
* @return the topicPartitions for this endpoint.
* @since 2.3
*/
@Nullable
@Override
public TopicPartitionOffset[] getTopicPartitionsToAssign() {
return this.topicPartitions.toArray(new TopicPartitionOffset[0]);
}
/**
* Set the topic pattern to use. Cannot be used with
* topics or topicPartitions.
* @param topicPattern the pattern
* @see #setTopicPartitions(TopicPartitionOffset...)
* @see #setTopics(String...)
*/
public void setTopicPattern(Pattern topicPattern) {
this.topicPattern = topicPattern;
}
/**
* Return the topicPattern for this endpoint.
* @return the topicPattern for this endpoint.
*/
@Nullable
@Override
public Pattern getTopicPattern() {
return this.topicPattern;
}
@Nullable
@Override
public String getGroup() {
return this.group;
}
/**
* Set the group for the corresponding listener container.
* @param group the group.
*/
public void setGroup(String group) {
this.group = group;
}
/**
* Return true if this endpoint creates a batch listener.
* @return true for a batch listener.
* @since 1.1
*/
public boolean isBatchListener() {
return this.batchListener == null ? false : this.batchListener;
}
/**
* Return the current batch listener flag for this endpoint, or null if not explicitly
* set.
* @return the batch listener flag.
* @since 2.8
*/
@Nullable
public Boolean getBatchListener() {
return this.batchListener;
}
/**
* Set to true if this endpoint should create a batch listener.
* @param batchListener true for a batch listener.
* @since 1.1
*/
public void setBatchListener(boolean batchListener) {
this.batchListener = batchListener;
}
/**
* Set the {@link KafkaTemplate} to use to send replies.
* @param replyTemplate the template.
* @since 2.0
*/
public void setReplyTemplate(KafkaTemplate<?, ?> replyTemplate) {
this.replyTemplate = replyTemplate;
}
@Nullable
protected KafkaTemplate<?, ?> getReplyTemplate() {
return this.replyTemplate;
}
@Nullable
protected RecordFilterStrategy<? super K, ? super V> getRecordFilterStrategy() {
return this.recordFilterStrategy;
}
/**
* Set a {@link RecordFilterStrategy} implementation.
* @param recordFilterStrategy the strategy implementation.
*/
@SuppressWarnings("unchecked")
public void setRecordFilterStrategy(RecordFilterStrategy<? super K, ? super V> recordFilterStrategy) {
this.recordFilterStrategy = (RecordFilterStrategy<K, V>) recordFilterStrategy;
}
protected boolean isAckDiscarded() {
return this.ackDiscarded;
}
/**
* Set to true if the {@link #setRecordFilterStrategy(RecordFilterStrategy)} is in use.
* @param ackDiscarded the ackDiscarded.
*/
public void setAckDiscarded(boolean ackDiscarded) {
this.ackDiscarded = ackDiscarded;
}
@Deprecated
@Nullable
protected RetryTemplate getRetryTemplate() {
return this.retryTemplate;
}
/**
* Set a retryTemplate.
* @param retryTemplate the template.
* @deprecated since 2.8 - use a suitably configured error handler instead.
*/
@Deprecated
public void setRetryTemplate(RetryTemplate retryTemplate) {
this.retryTemplate = retryTemplate;
}
/**
* Get the recovery callback.
* @return the recovery callback.
* @deprecated since 2.8 - use a suitably configured error handler instead.
*/
@Deprecated
@Nullable
protected RecoveryCallback<?> getRecoveryCallback() {
return this.recoveryCallback;
}
/**
* Set a callback to be used with the {@link #setRetryTemplate(RetryTemplate)}.
* @param recoveryCallback the callback.
* @deprecated since 2.8 - use a suitably configured error handler instead.
*/
@Deprecated
public void setRecoveryCallback(RecoveryCallback<? extends Object> recoveryCallback) {
this.recoveryCallback = recoveryCallback;
}
/**
* Return the stateful retry.
* @return the stateful retry.
* @deprecated since 2.8 - use a suitably configured error handler instead.
*/
@Deprecated
protected boolean isStatefulRetry() {
return this.statefulRetry;
}
/**
* When using a {@link RetryTemplate}, set to true to enable stateful retry. Use in
* conjunction with a
* {@link org.springframework.kafka.listener.SeekToCurrentErrorHandler} when retry can
* take excessive time; each failure goes back to the broker, to keep the Consumer
* alive.
* @param statefulRetry true to enable stateful retry.
* @since 2.1.3
* @deprecated since 2.8 - use a suitably configured error handler instead.
*/
@Deprecated
public void setStatefulRetry(boolean statefulRetry) {
this.statefulRetry = statefulRetry;
}
@Nullable
@Override
public String getClientIdPrefix() {
return this.clientIdPrefix;
}
/**
* Set the client id prefix; overrides the client id in the consumer configuration
* properties.
* @param clientIdPrefix the prefix.
* @since 2.1.1
*/
public void setClientIdPrefix(String clientIdPrefix) {
this.clientIdPrefix = clientIdPrefix;
}
@Override
@Nullable
public Integer getConcurrency() {
return this.concurrency;
}
/**
* Set the concurrency for this endpoint's container.
* @param concurrency the concurrency.
* @since 2.2
*/
public void setConcurrency(Integer concurrency) {
this.concurrency = concurrency;
}
@Override
@Nullable
public Boolean getAutoStartup() {
return this.autoStartup;
}
/**
* Set the autoStartup for this endpoint's container.
* @param autoStartup the autoStartup.
* @since 2.2
*/
public void setAutoStartup(Boolean autoStartup) {
this.autoStartup = autoStartup;
}
/**
* Set a configurer which will be invoked when creating a reply message.
* @param replyHeadersConfigurer the configurer.
* @since 2.2
*/
public void setReplyHeadersConfigurer(ReplyHeadersConfigurer replyHeadersConfigurer) {
this.replyHeadersConfigurer = replyHeadersConfigurer;
}
@Override
@Nullable
public Properties getConsumerProperties() {
return this.consumerProperties;
}
/**
* Set the consumer properties that will be merged with the consumer properties
* provided by the consumer factory; properties here will supersede any with the same
* name(s) in the consumer factory.
* {@code group.id} and {@code client.id} are ignored.
* @param consumerProperties the properties.
* @since 2.1.4
* @see org.apache.kafka.clients.consumer.ConsumerConfig
* @see #setGroupId(String)
* @see #setClientIdPrefix(String)
*/
public void setConsumerProperties(Properties consumerProperties) {
this.consumerProperties = consumerProperties;
}
@Override
public boolean isSplitIterables() {
return this.splitIterables;
}
/**
* Set to false to disable splitting {@link Iterable} reply values into separate
* records.
* @param splitIterables false to disable; default true.
* @since 2.3.5
*/
public void setSplitIterables(boolean splitIterables) {
this.splitIterables = splitIterables;
}
@Override
@Nullable
public byte[] getListenerInfo() {
return this.listenerInfo; // NOSONAR
}
/**
* Set the listener info to insert in the record header.
* @param listenerInfo the info.
* @since 2.8.4
*/
public void setListenerInfo(@Nullable byte[] listenerInfo) { // NOSONAR
this.listenerInfo = listenerInfo; // NOSONAR
}
@Nullable
protected BatchToRecordAdapter<K, V> getBatchToRecordAdapter() {
return this.batchToRecordAdapter;
}
/**
* Set a {@link BatchToRecordAdapter}.
* @param batchToRecordAdapter the adapter.
* @since 2.4.2
*/
public void setBatchToRecordAdapter(BatchToRecordAdapter<K, V> batchToRecordAdapter) {
this.batchToRecordAdapter = batchToRecordAdapter;
}
@Override
public void afterPropertiesSet() {
boolean topicsEmpty = getTopics().isEmpty();
boolean topicPartitionsEmpty = ObjectUtils.isEmpty(getTopicPartitionsToAssign());
if (!topicsEmpty && !topicPartitionsEmpty) {
throw new IllegalStateException("Topics or topicPartitions must be provided but not both for " + this);
}
if (this.topicPattern != null && (!topicsEmpty || !topicPartitionsEmpty)) {
throw new IllegalStateException("Only one of topics, topicPartitions or topicPattern must are allowed for "
+ this);
}
if (this.topicPattern == null && topicsEmpty && topicPartitionsEmpty) {
throw new IllegalStateException("At least one of topics, topicPartitions or topicPattern must be provided "
+ "for " + this);
}
}
@Override
public void setupListenerContainer(MessageListenerContainer listenerContainer,
@Nullable MessageConverter messageConverter) {
setupMessageListener(listenerContainer, messageConverter);
}
/**
* Create a {@link MessageListener} that is able to serve this endpoint for the
* specified container.
* @param container the {@link MessageListenerContainer} to create a {@link MessageListener}.
* @param messageConverter the message converter - may be null.
* @return a {@link MessageListener} instance.
*/
protected abstract MessagingMessageListenerAdapter<K, V> createMessageListener(MessageListenerContainer container,
@Nullable MessageConverter messageConverter);
@SuppressWarnings({ "unchecked", "deprecation" })
private void setupMessageListener(MessageListenerContainer container,
@Nullable MessageConverter messageConverter) {
MessagingMessageListenerAdapter<K, V> adapter = createMessageListener(container, messageConverter);
if (this.replyHeadersConfigurer != null) {
adapter.setReplyHeadersConfigurer(this.replyHeadersConfigurer);
}
adapter.setSplitIterables(this.splitIterables);
Object messageListener = adapter;
boolean isBatchListener = isBatchListener();
Assert.state(messageListener != null,
() -> "Endpoint [" + this + "] must provide a non null message listener");
Assert.state(this.retryTemplate == null || !isBatchListener,
"A 'RetryTemplate' is not supported with a batch listener; consider configuring the container "
+ "with a suitably configured 'SeekToCurrentBatchErrorHandler' instead");
if (this.retryTemplate != null) {
messageListener = new org.springframework.kafka.listener.adapter.RetryingMessageListenerAdapter<>(
(MessageListener<K, V>) messageListener,
this.retryTemplate, this.recoveryCallback, this.statefulRetry);
}
if (this.recordFilterStrategy != null) {
if (isBatchListener) {
if (((MessagingMessageListenerAdapter<K, V>) messageListener).isConsumerRecords()) {
this.logger.warn(() -> "Filter strategy ignored when consuming 'ConsumerRecords' instead of a List"
+ (this.id != null ? " id: " + this.id : ""));
}
else {
messageListener = new FilteringBatchMessageListenerAdapter<>(
(BatchMessageListener<K, V>) messageListener, this.recordFilterStrategy, this.ackDiscarded);
}
}
else {
messageListener = new FilteringMessageListenerAdapter<>((MessageListener<K, V>) messageListener,
this.recordFilterStrategy, this.ackDiscarded);
}
}
container.setupMessageListener(messageListener);
}
/**
* Return a description for this endpoint.
* @return a description for this endpoint.
* <p>Available to subclasses, for inclusion in their {@code toString()} result.
*/
protected StringBuilder getEndpointDescription() {
StringBuilder result = new StringBuilder();
return result.append(getClass().getSimpleName()).append("[").append(this.id).
append("] topics=").append(this.topics).
append("' | topicPartitions='").append(this.topicPartitions).
append("' | topicPattern='").append(this.topicPattern).append("'");
}
@Override
public String toString() {
return getEndpointDescription().toString();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/CompositeKafkaStreamsCustomizer.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.streams.KafkaStreams;
/**
* Composite {@link KafkaStreamsCustomizer} customizes {@link KafkaStreams} by delegating
* to a list of provided {@link KafkaStreamsCustomizer}.
*
* @author Nurettin Yilmaz
* @author Artem Bilan
*
* @since 2.1.5
*/
public class CompositeKafkaStreamsCustomizer implements KafkaStreamsCustomizer {
private final List<KafkaStreamsCustomizer> kafkaStreamsCustomizers = new ArrayList<>();
public CompositeKafkaStreamsCustomizer() {
}
public CompositeKafkaStreamsCustomizer(List<KafkaStreamsCustomizer> kafkaStreamsCustomizers) {
this.kafkaStreamsCustomizers.addAll(kafkaStreamsCustomizers);
}
@Override
public void customize(KafkaStreams kafkaStreams) {
this.kafkaStreamsCustomizers.forEach(kafkaStreamsCustomizer -> kafkaStreamsCustomizer.customize(kafkaStreams));
}
public void addKafkaStreamsCustomizers(List<KafkaStreamsCustomizer> customizers) {
this.kafkaStreamsCustomizers.addAll(customizers);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/CompositeKafkaStreamsInfrastructureCustomizer.java | /*
* Copyright 2020-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.Topology;
/**
* Composite {@link KafkaStreamsInfrastructureCustomizer} customizes
* {@link org.apache.kafka.streams.KafkaStreams} by delegating to a list of provided
* {@link KafkaStreamsInfrastructureCustomizer}.
*
* @author Gary Russell
*
* @since 2.4.1
*/
public class CompositeKafkaStreamsInfrastructureCustomizer implements KafkaStreamsInfrastructureCustomizer {
private final List<KafkaStreamsInfrastructureCustomizer> infrastructureCustomizers = new ArrayList<>();
/**
* Construct an instance with the provided customizers.
* @param customizers the customizers;
*/
public CompositeKafkaStreamsInfrastructureCustomizer(KafkaStreamsInfrastructureCustomizer... customizers) {
this.infrastructureCustomizers.addAll(Arrays.asList(customizers));
}
/**
* Add customizers.
* @param customizers the customizers.
*/
public void addKafkaStreamsCustomizers(KafkaStreamsInfrastructureCustomizer... customizers) {
this.infrastructureCustomizers.addAll(Arrays.asList(customizers));
}
@Override
public void configureBuilder(StreamsBuilder builder) {
this.infrastructureCustomizers.forEach(cust -> cust.configureBuilder(builder));
}
@Override
public void configureTopology(Topology topology) {
this.infrastructureCustomizers.forEach(cust -> cust.configureTopology(topology));
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/ConcurrentKafkaListenerContainerFactory.java | /*
* Copyright 2014-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.util.Collection;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.support.TopicPartitionOffset;
/**
* A {@link KafkaListenerContainerFactory} implementation to build a
* {@link ConcurrentMessageListenerContainer}.
* <p>
* This should be the default for most users and a good transition paths for those that
* are used to building such container definitions manually.
*
* This factory is primarily for building containers for {@code KafkaListener} annotated
* methods but can also be used to create any container.
*
* Only containers for {@code KafkaListener} annotated methods are added to the
* {@code KafkaListenerEndpointRegistry}.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Stephane Nicoll
* @author Gary Russell
* @author Artem Bilan
* @author Murali Reddy
*/
public class ConcurrentKafkaListenerContainerFactory<K, V>
extends AbstractKafkaListenerContainerFactory<ConcurrentMessageListenerContainer<K, V>, K, V> {
private Integer concurrency;
/**
* Specify the container concurrency.
* @param concurrency the number of consumers to create.
* @see ConcurrentMessageListenerContainer#setConcurrency(int)
*/
public void setConcurrency(Integer concurrency) {
this.concurrency = concurrency;
}
@Override
protected ConcurrentMessageListenerContainer<K, V> createContainerInstance(KafkaListenerEndpoint endpoint) {
TopicPartitionOffset[] topicPartitions = endpoint.getTopicPartitionsToAssign();
if (topicPartitions != null && topicPartitions.length > 0) {
ContainerProperties properties = new ContainerProperties(topicPartitions);
return new ConcurrentMessageListenerContainer<>(getConsumerFactory(), properties);
}
else {
Collection<String> topics = endpoint.getTopics();
if (!topics.isEmpty()) { // NOSONAR
ContainerProperties properties = new ContainerProperties(topics.toArray(new String[0]));
return new ConcurrentMessageListenerContainer<>(getConsumerFactory(), properties);
}
else {
ContainerProperties properties = new ContainerProperties(endpoint.getTopicPattern()); // NOSONAR
return new ConcurrentMessageListenerContainer<>(getConsumerFactory(), properties);
}
}
}
@Override
protected void initializeContainer(ConcurrentMessageListenerContainer<K, V> instance,
KafkaListenerEndpoint endpoint) {
super.initializeContainer(instance, endpoint);
Integer conc = endpoint.getConcurrency();
if (conc != null) {
instance.setConcurrency(conc);
}
else if (this.concurrency != null) {
instance.setConcurrency(this.concurrency);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/ContainerCustomizer.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
/**
* Called by the container factory after the container is created and configured.
*
* @param <K> the key type.
* @param <V> the value type.
* @param <C> the container type.
*
* @author Gary Russell
* @since 2.3.4
*
*/
@FunctionalInterface
public interface ContainerCustomizer<K, V, C extends AbstractMessageListenerContainer<K, V>> {
/**
* Configure the container.
* @param container the container.
*/
void configure(C container);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/KafkaListenerConfigUtils.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
/**
* Configuration constants for internal sharing across subpackages.
*
* @author Juergen Hoeller
* @author Gary Russell
*/
public abstract class KafkaListenerConfigUtils {
/**
* The bean name of the internally managed Kafka listener annotation processor.
*/
public static final String KAFKA_LISTENER_ANNOTATION_PROCESSOR_BEAN_NAME =
"org.springframework.kafka.config.internalKafkaListenerAnnotationProcessor";
/**
* The bean name of the internally managed Kafka listener endpoint registry.
*/
public static final String KAFKA_LISTENER_ENDPOINT_REGISTRY_BEAN_NAME =
"org.springframework.kafka.config.internalKafkaListenerEndpointRegistry";
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/KafkaListenerContainerFactory.java | /*
* Copyright 2002-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.util.regex.Pattern;
import org.springframework.kafka.listener.MessageListenerContainer;
import org.springframework.kafka.support.TopicPartitionOffset;
/**
* Factory for {@link MessageListenerContainer}s.
*
* @param <C> the {@link MessageListenerContainer} implementation type.
*
* @author Stephane Nicoll
* @author Gary Russell
*
* @see KafkaListenerEndpoint
*/
public interface KafkaListenerContainerFactory<C extends MessageListenerContainer> {
/**
* Create a {@link MessageListenerContainer} for the given {@link KafkaListenerEndpoint}.
* Containers created using this method are added to the listener endpoint registry.
* @param endpoint the endpoint to configure
* @return the created container
*/
C createListenerContainer(KafkaListenerEndpoint endpoint);
/**
* Create and configure a container without a listener; used to create containers that
* are not used for KafkaListener annotations. Containers created using this method
* are not added to the listener endpoint registry.
* @param topicPartitions the topicPartitions to assign.
* @return the container.
* @since 2.3
*/
C createContainer(TopicPartitionOffset... topicPartitions);
/**
* Create and configure a container without a listener; used to create containers that
* are not used for KafkaListener annotations. Containers created using this method
* are not added to the listener endpoint registry.
* @param topics the topics.
* @return the container.
* @since 2.2
*/
C createContainer(String... topics);
/**
* Create and configure a container without a listener; used to create containers that
* are not used for KafkaListener annotations. Containers created using this method
* are not added to the listener endpoint registry.
* @param topicPattern the topicPattern.
* @return the container.
* @since 2.2
*/
C createContainer(Pattern topicPattern);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/KafkaListenerEndpoint.java | /*
* Copyright 2002-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.util.Collection;
import java.util.Properties;
import java.util.regex.Pattern;
import org.springframework.kafka.listener.MessageListenerContainer;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.kafka.support.converter.MessageConverter;
import org.springframework.lang.Nullable;
/**
* Model for a Kafka listener endpoint. Can be used against a
* {@link org.springframework.kafka.annotation.KafkaListenerConfigurer
* KafkaListenerConfigurer} to register endpoints programmatically.
*
* @author Stephane Nicoll
* @author Gary Russell
*/
public interface KafkaListenerEndpoint {
/**
* Return the id of this endpoint.
* @return the id of this endpoint. The id can be further qualified
* when the endpoint is resolved against its actual listener
* container.
* @see KafkaListenerContainerFactory#createListenerContainer
*/
@Nullable
String getId();
/**
* Return the groupId of this endpoint - if present, overrides the
* {@code group.id} property of the consumer factory.
* @return the group id; may be null.
* @since 1.3
*/
@Nullable
String getGroupId();
/**
* Return the group of this endpoint or null if not in a group.
* @return the group of this endpoint or null if not in a group.
*/
@Nullable
String getGroup();
/**
* Return the topics for this endpoint.
* @return the topics for this endpoint.
*/
Collection<String> getTopics();
/**
* Return the topicPartitions for this endpoint.
* @return the topicPartitions for this endpoint.
* @since 2.3
*/
@Nullable
TopicPartitionOffset[] getTopicPartitionsToAssign();
/**
* Return the topicPattern for this endpoint.
* @return the topicPattern for this endpoint.
*/
@Nullable
Pattern getTopicPattern();
/**
* Return the client id prefix for the container; it will be suffixed by
* '-n' to provide a unique id when concurrency is used.
* @return the client id prefix.
* @since 2.1.1
*/
@Nullable
String getClientIdPrefix();
/**
* Return the concurrency for this endpoint's container.
* @return the concurrency.
* @since 2.2
*/
@Nullable
Integer getConcurrency();
/**
* Return the autoStartup for this endpoint's container.
* @return the autoStartup.
* @since 2.2
*/
@Nullable
Boolean getAutoStartup();
/**
* Get the consumer properties that will be merged with the consumer properties
* provided by the consumer factory; properties here will supersede any with the same
* name(s) in the consumer factory.
* {@code group.id} and {@code client.id} are ignored.
* @return the properties.
* @since 2.1.4
* @see org.apache.kafka.clients.consumer.ConsumerConfig
* @see #getGroupId()
* @see #getClientIdPrefix()
*/
@Nullable
default Properties getConsumerProperties() {
return null;
}
/**
* Setup the specified message listener container with the model
* defined by this endpoint.
* <p>This endpoint must provide the requested missing option(s) of
* the specified container to make it usable. Usually, this is about
* setting the {@code queues} and the {@code messageListener} to
* use but an implementation may override any default setting that
* was already set.
* @param listenerContainer the listener container to configure
* @param messageConverter the message converter - can be null
*/
void setupListenerContainer(MessageListenerContainer listenerContainer,
@Nullable MessageConverter messageConverter);
/**
* When true, {@link Iterable} return results will be split into discrete records.
* @return true to split.
* @since 2.3.5
*/
boolean isSplitIterables();
/**
* Get the listener info to insert in the record header.
* @return the info.
* @since 2.8.4
*/
@Nullable
default byte[] getListenerInfo() {
return null;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/KafkaListenerEndpointAdapter.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.util.Collection;
import java.util.Collections;
import java.util.regex.Pattern;
import org.springframework.kafka.listener.MessageListenerContainer;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.kafka.support.converter.MessageConverter;
import org.springframework.lang.Nullable;
/**
* Adapter to avoid having to implement all methods.
*
* @author Gary Russell
* @since 2.2
*
*/
class KafkaListenerEndpointAdapter implements KafkaListenerEndpoint {
KafkaListenerEndpointAdapter() {
}
@Nullable
@Override
public String getId() {
return null;
}
@Nullable
@Override
public String getGroupId() {
return null;
}
@Nullable
@Override
public String getGroup() {
return null;
}
@Nullable
@Override
public Collection<String> getTopics() {
return Collections.emptyList();
}
@Nullable
@Override
public TopicPartitionOffset[] getTopicPartitionsToAssign() {
return new TopicPartitionOffset[0];
}
@Nullable
@Override
public Pattern getTopicPattern() {
return null;
}
@Nullable
@Override
public String getClientIdPrefix() {
return null;
}
@Nullable
@Override
public Integer getConcurrency() {
return null;
}
@Nullable
@Override
public Boolean getAutoStartup() { // NOSONAR
return null; // NOSONAR null check by caller
}
@Override
public void setupListenerContainer(MessageListenerContainer listenerContainer,
@Nullable MessageConverter messageConverter) {
}
@Override
public boolean isSplitIterables() {
return true;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/KafkaListenerEndpointRegistrar.java | /*
* Copyright 2014-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanFactoryAware;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.lang.Nullable;
import org.springframework.messaging.handler.annotation.support.MessageHandlerMethodFactory;
import org.springframework.messaging.handler.invocation.HandlerMethodArgumentResolver;
import org.springframework.util.Assert;
import org.springframework.validation.Validator;
/**
* Helper bean for registering {@link KafkaListenerEndpoint} with
* a {@link KafkaListenerEndpointRegistry}.
*
* @author Stephane Nicoll
* @author Juergen Hoeller
* @author Artem Bilan
* @author Gary Russell
* @author Filip Halemba
*
* @see org.springframework.kafka.annotation.KafkaListenerConfigurer
*/
public class KafkaListenerEndpointRegistrar implements BeanFactoryAware, InitializingBean {
private final List<KafkaListenerEndpointDescriptor> endpointDescriptors = new ArrayList<>();
private List<HandlerMethodArgumentResolver> customMethodArgumentResolvers = new ArrayList<>();
private KafkaListenerEndpointRegistry endpointRegistry;
private MessageHandlerMethodFactory messageHandlerMethodFactory;
private KafkaListenerContainerFactory<?> containerFactory;
private String containerFactoryBeanName;
private BeanFactory beanFactory;
private boolean startImmediately;
private Validator validator;
/**
* Set the {@link KafkaListenerEndpointRegistry} instance to use.
* @param endpointRegistry the {@link KafkaListenerEndpointRegistry} instance to use.
*/
public void setEndpointRegistry(KafkaListenerEndpointRegistry endpointRegistry) {
this.endpointRegistry = endpointRegistry;
}
/**
* Return the {@link KafkaListenerEndpointRegistry} instance for this
* registrar, may be {@code null}.
* @return the {@link KafkaListenerEndpointRegistry} instance for this
* registrar, may be {@code null}.
*/
@Nullable
public KafkaListenerEndpointRegistry getEndpointRegistry() {
return this.endpointRegistry;
}
/**
* Return the list of {@link HandlerMethodArgumentResolver}.
* @return the list of {@link HandlerMethodArgumentResolver}.
* @since 2.4.2
*/
public List<HandlerMethodArgumentResolver> getCustomMethodArgumentResolvers() {
return Collections.unmodifiableList(this.customMethodArgumentResolvers);
}
/**
* Add custom methods arguments resolvers to
* {@link org.springframework.kafka.annotation.KafkaListenerAnnotationBeanPostProcessor}
* Default empty list.
* @param methodArgumentResolvers the methodArgumentResolvers to assign.
* @since 2.4.2
*/
public void setCustomMethodArgumentResolvers(HandlerMethodArgumentResolver... methodArgumentResolvers) {
this.customMethodArgumentResolvers = Arrays.asList(methodArgumentResolvers);
}
/**
* Set the {@link MessageHandlerMethodFactory} to use to configure the message
* listener responsible to serve an endpoint detected by this processor.
* <p>By default,
* {@link org.springframework.messaging.handler.annotation.support.DefaultMessageHandlerMethodFactory}
* is used and it can be configured further to support additional method arguments
* or to customize conversion and validation support. See
* {@link org.springframework.messaging.handler.annotation.support.DefaultMessageHandlerMethodFactory}
* javadoc for more details.
* @param kafkaHandlerMethodFactory the {@link MessageHandlerMethodFactory} instance.
*/
public void setMessageHandlerMethodFactory(MessageHandlerMethodFactory kafkaHandlerMethodFactory) {
Assert.isNull(this.validator,
"A validator cannot be provided with a custom message handler factory");
this.messageHandlerMethodFactory = kafkaHandlerMethodFactory;
}
/**
* Return the custom {@link MessageHandlerMethodFactory} to use, if any.
* @return the custom {@link MessageHandlerMethodFactory} to use, if any.
*/
@Nullable
public MessageHandlerMethodFactory getMessageHandlerMethodFactory() {
return this.messageHandlerMethodFactory;
}
/**
* Set the {@link KafkaListenerContainerFactory} to use in case a {@link KafkaListenerEndpoint}
* is registered with a {@code null} container factory.
* <p>Alternatively, the bean name of the {@link KafkaListenerContainerFactory} to use
* can be specified for a lazy lookup, see {@link #setContainerFactoryBeanName}.
* @param containerFactory the {@link KafkaListenerContainerFactory} instance.
*/
public void setContainerFactory(KafkaListenerContainerFactory<?> containerFactory) {
this.containerFactory = containerFactory;
}
/**
* Set the bean name of the {@link KafkaListenerContainerFactory} to use in case
* a {@link KafkaListenerEndpoint} is registered with a {@code null} container factory.
* Alternatively, the container factory instance can be registered directly:
* see {@link #setContainerFactory(KafkaListenerContainerFactory)}.
* @param containerFactoryBeanName the {@link KafkaListenerContainerFactory} bean name.
* @see #setBeanFactory
*/
public void setContainerFactoryBeanName(String containerFactoryBeanName) {
this.containerFactoryBeanName = containerFactoryBeanName;
}
/**
* A {@link BeanFactory} only needs to be available in conjunction with
* {@link #setContainerFactoryBeanName}.
* @param beanFactory the {@link BeanFactory} instance.
*/
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
}
/**
* Get the validator, if supplied.
* @return the validator.
* @since 2.2
*/
@Nullable
public Validator getValidator() {
return this.validator;
}
/**
* Set the validator to use if the default message handler factory is used.
* @param validator the validator.
* @since 2.2
*/
public void setValidator(Validator validator) {
Assert.isNull(this.messageHandlerMethodFactory,
"A validator cannot be provided with a custom message handler factory");
this.validator = validator;
}
@Override
public void afterPropertiesSet() {
registerAllEndpoints();
}
protected void registerAllEndpoints() {
synchronized (this.endpointDescriptors) {
for (KafkaListenerEndpointDescriptor descriptor : this.endpointDescriptors) {
if (descriptor.endpoint instanceof MultiMethodKafkaListenerEndpoint
&& this.validator != null) {
((MultiMethodKafkaListenerEndpoint) descriptor.endpoint).setValidator(this.validator);
}
this.endpointRegistry.registerListenerContainer(
descriptor.endpoint, resolveContainerFactory(descriptor));
}
this.startImmediately = true; // trigger immediate startup
}
}
private KafkaListenerContainerFactory<?> resolveContainerFactory(KafkaListenerEndpointDescriptor descriptor) {
if (descriptor.containerFactory != null) {
return descriptor.containerFactory;
}
else if (this.containerFactory != null) {
return this.containerFactory;
}
else if (this.containerFactoryBeanName != null) {
Assert.state(this.beanFactory != null, "BeanFactory must be set to obtain container factory by bean name");
this.containerFactory = this.beanFactory.getBean(
this.containerFactoryBeanName, KafkaListenerContainerFactory.class);
return this.containerFactory; // Consider changing this if live change of the factory is required
}
else {
throw new IllegalStateException("Could not resolve the " +
KafkaListenerContainerFactory.class.getSimpleName() + " to use for [" +
descriptor.endpoint + "] no factory was given and no default is set.");
}
}
/**
* Register a new {@link KafkaListenerEndpoint} alongside the
* {@link KafkaListenerContainerFactory} to use to create the underlying container.
* <p>The {@code factory} may be {@code null} if the default factory has to be
* used for that endpoint.
* @param endpoint the {@link KafkaListenerEndpoint} instance to register.
* @param factory the {@link KafkaListenerContainerFactory} to use.
*/
public void registerEndpoint(KafkaListenerEndpoint endpoint, @Nullable KafkaListenerContainerFactory<?> factory) {
Assert.notNull(endpoint, "Endpoint must be set");
Assert.hasText(endpoint.getId(), "Endpoint id must be set");
// Factory may be null, we defer the resolution right before actually creating the container
KafkaListenerEndpointDescriptor descriptor = new KafkaListenerEndpointDescriptor(endpoint, factory);
synchronized (this.endpointDescriptors) {
if (this.startImmediately) { // Register and start immediately
this.endpointRegistry.registerListenerContainer(descriptor.endpoint,
resolveContainerFactory(descriptor), true);
}
else {
this.endpointDescriptors.add(descriptor);
}
}
}
/**
* Register a new {@link KafkaListenerEndpoint} using the default
* {@link KafkaListenerContainerFactory} to create the underlying container.
* @param endpoint the {@link KafkaListenerEndpoint} instance to register.
* @see #setContainerFactory(KafkaListenerContainerFactory)
* @see #registerEndpoint(KafkaListenerEndpoint, KafkaListenerContainerFactory)
*/
public void registerEndpoint(KafkaListenerEndpoint endpoint) {
registerEndpoint(endpoint, null);
}
private static final class KafkaListenerEndpointDescriptor {
private final KafkaListenerEndpoint endpoint;
private final KafkaListenerContainerFactory<?> containerFactory;
private KafkaListenerEndpointDescriptor(KafkaListenerEndpoint endpoint,
@Nullable KafkaListenerContainerFactory<?> containerFactory) {
this.endpoint = endpoint;
this.containerFactory = containerFactory;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/KafkaListenerEndpointRegistry.java | /*
* Copyright 2014-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.LogFactory;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.BeanInitializationException;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.ApplicationListener;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.SmartLifecycle;
import org.springframework.context.event.ContextRefreshedEvent;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
import org.springframework.kafka.listener.ContainerGroup;
import org.springframework.kafka.listener.ListenerContainerRegistry;
import org.springframework.kafka.listener.MessageListenerContainer;
import org.springframework.kafka.support.EndpointHandlerMethod;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
/**
* Creates the necessary {@link MessageListenerContainer} instances for the
* registered {@linkplain KafkaListenerEndpoint endpoints}. Also manages the
* lifecycle of the listener containers, in particular within the lifecycle
* of the application context.
*
* <p>Contrary to {@link MessageListenerContainer}s created manually, listener
* containers managed by registry are not beans in the application context and
* are not candidates for autowiring. Use {@link #getListenerContainers()} if
* you need to access this registry's listener containers for management purposes.
* If you need to access to a specific message listener container, use
* {@link #getListenerContainer(String)} with the id of the endpoint.
*
* @author Stephane Nicoll
* @author Juergen Hoeller
* @author Artem Bilan
* @author Gary Russell
* @author Asi Bross
*
* @see KafkaListenerEndpoint
* @see MessageListenerContainer
* @see KafkaListenerContainerFactory
*/
public class KafkaListenerEndpointRegistry implements ListenerContainerRegistry, DisposableBean, SmartLifecycle,
ApplicationContextAware, ApplicationListener<ContextRefreshedEvent> {
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); //NOSONAR
private final Map<String, MessageListenerContainer> listenerContainers = new ConcurrentHashMap<>();
private int phase = AbstractMessageListenerContainer.DEFAULT_PHASE;
private ConfigurableApplicationContext applicationContext;
private boolean contextRefreshed;
private volatile boolean running;
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
if (applicationContext instanceof ConfigurableApplicationContext) {
this.applicationContext = (ConfigurableApplicationContext) applicationContext;
}
}
/**
* Return the {@link MessageListenerContainer} with the specified id or
* {@code null} if no such container exists.
* @param id the id of the container
* @return the container or {@code null} if no container with that id exists
* @see KafkaListenerEndpoint#getId()
* @see #getListenerContainerIds()
*/
@Override
@Nullable
public MessageListenerContainer getListenerContainer(String id) {
Assert.hasText(id, "Container identifier must not be empty");
return this.listenerContainers.get(id);
}
/**
* Return the ids of the managed {@link MessageListenerContainer} instance(s).
* @return the ids.
* @see #getListenerContainer(String)
*/
@Override
public Set<String> getListenerContainerIds() {
return Collections.unmodifiableSet(this.listenerContainers.keySet());
}
/**
* Return the managed {@link MessageListenerContainer} instance(s).
* @return the managed {@link MessageListenerContainer} instance(s).
* @see #getAllListenerContainers()
*/
@Override
public Collection<MessageListenerContainer> getListenerContainers() {
return Collections.unmodifiableCollection(this.listenerContainers.values());
}
/**
* Return all {@link MessageListenerContainer} instances including those managed by
* this registry and those declared as beans in the application context.
* Prototype-scoped containers will be included. Lazy beans that have not yet been
* created will not be initialized by a call to this method.
* @return the {@link MessageListenerContainer} instance(s).
* @since 2.2.5
* @see #getListenerContainers()
*/
@Override
public Collection<MessageListenerContainer> getAllListenerContainers() {
List<MessageListenerContainer> containers = new ArrayList<>();
containers.addAll(getListenerContainers());
containers.addAll(this.applicationContext.getBeansOfType(MessageListenerContainer.class, true, false).values());
return containers;
}
/**
* Create a message listener container for the given {@link KafkaListenerEndpoint}.
* <p>This create the necessary infrastructure to honor that endpoint
* with regards to its configuration.
* @param endpoint the endpoint to add
* @param factory the listener factory to use
* @see #registerListenerContainer(KafkaListenerEndpoint, KafkaListenerContainerFactory, boolean)
*/
public void registerListenerContainer(KafkaListenerEndpoint endpoint, KafkaListenerContainerFactory<?> factory) {
registerListenerContainer(endpoint, factory, false);
}
/**
* Create a message listener container for the given {@link KafkaListenerEndpoint}.
* <p>This create the necessary infrastructure to honor that endpoint
* with regards to its configuration.
* <p>The {@code startImmediately} flag determines if the container should be
* started immediately.
* @param endpoint the endpoint to add.
* @param factory the {@link KafkaListenerContainerFactory} to use.
* @param startImmediately start the container immediately if necessary
* @see #getListenerContainers()
* @see #getListenerContainer(String)
*/
@SuppressWarnings("unchecked")
public void registerListenerContainer(KafkaListenerEndpoint endpoint, KafkaListenerContainerFactory<?> factory,
boolean startImmediately) {
Assert.notNull(endpoint, "Endpoint must not be null");
Assert.notNull(factory, "Factory must not be null");
String id = endpoint.getId();
Assert.hasText(id, "Endpoint id must not be empty");
synchronized (this.listenerContainers) {
Assert.state(!this.listenerContainers.containsKey(id),
"Another endpoint is already registered with id '" + id + "'");
MessageListenerContainer container = createListenerContainer(endpoint, factory);
this.listenerContainers.put(id, container);
ConfigurableApplicationContext appContext = this.applicationContext;
String groupName = endpoint.getGroup();
if (StringUtils.hasText(groupName) && appContext != null) {
List<MessageListenerContainer> containerGroup;
ContainerGroup group;
if (appContext.containsBean(groupName)) { // NOSONAR - hasText
containerGroup = appContext.getBean(groupName, List.class); // NOSONAR - hasText
group = appContext.getBean(groupName + ".group", ContainerGroup.class);
}
else {
containerGroup = new ArrayList<MessageListenerContainer>();
appContext.getBeanFactory().registerSingleton(groupName, containerGroup); // NOSONAR - hasText
group = new ContainerGroup(groupName);
appContext.getBeanFactory().registerSingleton(groupName + ".group", group);
}
containerGroup.add(container);
group.addContainers(container);
}
if (startImmediately) {
startIfNecessary(container);
}
}
}
/**
* Create and start a new {@link MessageListenerContainer} using the specified factory.
* @param endpoint the endpoint to create a {@link MessageListenerContainer}.
* @param factory the {@link KafkaListenerContainerFactory} to use.
* @return the {@link MessageListenerContainer}.
*/
protected MessageListenerContainer createListenerContainer(KafkaListenerEndpoint endpoint,
KafkaListenerContainerFactory<?> factory) {
if (endpoint instanceof MethodKafkaListenerEndpoint) {
MethodKafkaListenerEndpoint<?, ?> mkle = (MethodKafkaListenerEndpoint<?, ?>) endpoint;
Object bean = mkle.getBean();
if (bean instanceof EndpointHandlerMethod) {
EndpointHandlerMethod ehm = (EndpointHandlerMethod) bean;
ehm = new EndpointHandlerMethod(ehm.resolveBean(this.applicationContext), ehm.getMethodName());
mkle.setBean(ehm.resolveBean(this.applicationContext));
mkle.setMethod(ehm.getMethod());
}
}
MessageListenerContainer listenerContainer = factory.createListenerContainer(endpoint);
if (listenerContainer instanceof InitializingBean) {
try {
((InitializingBean) listenerContainer).afterPropertiesSet();
}
catch (Exception ex) {
throw new BeanInitializationException("Failed to initialize message listener container", ex);
}
}
int containerPhase = listenerContainer.getPhase();
if (listenerContainer.isAutoStartup() &&
containerPhase != AbstractMessageListenerContainer.DEFAULT_PHASE) { // a custom phase value
if (this.phase != AbstractMessageListenerContainer.DEFAULT_PHASE && this.phase != containerPhase) {
throw new IllegalStateException("Encountered phase mismatch between container "
+ "factory definitions: " + this.phase + " vs " + containerPhase);
}
this.phase = listenerContainer.getPhase();
}
return listenerContainer;
}
@Override
public void destroy() {
for (MessageListenerContainer listenerContainer : getListenerContainers()) {
listenerContainer.destroy();
}
}
// Delegating implementation of SmartLifecycle
@Override
public int getPhase() {
return this.phase;
}
@Override
public boolean isAutoStartup() {
return true;
}
@Override
public void start() {
for (MessageListenerContainer listenerContainer : getListenerContainers()) {
startIfNecessary(listenerContainer);
}
this.running = true;
}
@Override
public void stop() {
this.running = false;
for (MessageListenerContainer listenerContainer : getListenerContainers()) {
listenerContainer.stop();
}
}
@Override
public void stop(Runnable callback) {
this.running = false;
Collection<MessageListenerContainer> listenerContainersToStop = getListenerContainers();
if (listenerContainersToStop.size() > 0) {
AggregatingCallback aggregatingCallback = new AggregatingCallback(listenerContainersToStop.size(),
callback);
for (MessageListenerContainer listenerContainer : listenerContainersToStop) {
if (listenerContainer.isRunning()) {
listenerContainer.stop(aggregatingCallback);
}
else {
aggregatingCallback.run();
}
}
}
else {
callback.run();
}
}
@Override
public boolean isRunning() {
return this.running;
}
@Override
public void onApplicationEvent(ContextRefreshedEvent event) {
if (event.getApplicationContext().equals(this.applicationContext)) {
this.contextRefreshed = true;
}
}
/**
* Start the specified {@link MessageListenerContainer} if it should be started
* on startup.
* @param listenerContainer the listener container to start.
* @see MessageListenerContainer#isAutoStartup()
*/
private void startIfNecessary(MessageListenerContainer listenerContainer) {
if (this.contextRefreshed || listenerContainer.isAutoStartup()) {
listenerContainer.start();
}
}
private static final class AggregatingCallback implements Runnable {
private final AtomicInteger count;
private final Runnable finishCallback;
private AggregatingCallback(int count, Runnable finishCallback) {
this.count = new AtomicInteger(count);
this.finishCallback = finishCallback;
}
@Override
public void run() {
if (this.count.decrementAndGet() <= 0) {
this.finishCallback.run();
}
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/KafkaStreamsConfiguration.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.springframework.util.Assert;
/**
* Wrapper for {@link org.apache.kafka.streams.StreamsBuilder} properties. The framework
* looks for a bean of this type with name 'defaultKafkaStreamsConfig' and auto-declares a
* {@link StreamsBuilderFactoryBean} using it. The {@link Properties} class is too general
* for such activity.
*
* @author Gary Russell
* @since 2.2
*
*/
public class KafkaStreamsConfiguration {
private final Map<String, Object> configs;
private Properties properties;
public KafkaStreamsConfiguration(Map<String, Object> configs) {
Assert.notNull(configs, "Configuration map cannot be null");
this.configs = new HashMap<>(configs);
}
/**
* Return the configuration map as a {@link Properties}.
* @return the properties.
*/
public Properties asProperties() {
if (this.properties == null) {
Properties props = new Properties();
props.putAll(this.configs);
this.properties = props;
}
return this.properties;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/KafkaStreamsCustomizer.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import org.apache.kafka.streams.KafkaStreams;
/**
* Callback interface that can be used to configure {@link KafkaStreams} directly.
*
* @author Nurettin Yilmaz
*
* @since 2.1.5
*
* @see StreamsBuilderFactoryBean
*/
@FunctionalInterface
public interface KafkaStreamsCustomizer {
void customize(KafkaStreams kafkaStreams);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/config/KafkaStreamsInfrastructureCustomizer.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.config;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.Topology;
/**
* A customizer for infrastructure components such as the {@code StreamsBuilder} and
* {@code Topology}. It can be provided to the {@link StreamsBuilderFactoryBean} which
* will apply the changes before creating the stream.
*
* @author Gary Russell
* @since 2.4.1
*
*/
public interface KafkaStreamsInfrastructureCustomizer {
/**
* Configure the builder.
* @param builder the builder.
*/
default void configureBuilder(StreamsBuilder builder) {
// no-op
}
/**
* Configure the topology.
* @param topology the topology
*/
default void configureTopology(Topology topology) {
// no-op
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.