index int64 | repo_id string | file_path string | content string |
|---|---|---|---|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/streams | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/streams/messaging/package-info.java | /**
* Package for classes related to spring-messaging with Kafka Streams.
*/
package org.springframework.kafka.streams.messaging;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/AbstractKafkaHeaderMapper.java | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.common.header.Header;
import org.springframework.core.log.LogAccessor;
import org.springframework.lang.Nullable;
import org.springframework.messaging.MessageHeaders;
import org.springframework.util.Assert;
import org.springframework.util.ObjectUtils;
import org.springframework.util.PatternMatchUtils;
/**
* Base for Kafka header mappers.
*
* @author Gary Russell
* @author Artem Bilan
*
* @since 2.1.3
*
*/
public abstract class AbstractKafkaHeaderMapper implements KafkaHeaderMapper {
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); // NOSONAR
private final List<HeaderMatcher> matchers = new ArrayList<>();
private final Map<String, Boolean> rawMappedHeaders = new HashMap<>();
{
this.rawMappedHeaders.put(KafkaHeaders.LISTENER_INFO, true);
}
private boolean mapAllStringsOut;
private Charset charset = StandardCharsets.UTF_8;
public AbstractKafkaHeaderMapper(String... patterns) {
Assert.notNull(patterns, "'patterns' must not be null");
this.matchers.add(new NeverMatchHeaderMatcher(
KafkaHeaders.ACKNOWLEDGMENT,
KafkaHeaders.CONSUMER,
KafkaHeaders.MESSAGE_KEY,
KafkaHeaders.OFFSET,
KafkaHeaders.PARTITION_ID,
KafkaHeaders.RAW_DATA,
KafkaHeaders.RECEIVED_MESSAGE_KEY,
KafkaHeaders.RECEIVED_PARTITION_ID,
KafkaHeaders.RECEIVED_TIMESTAMP,
KafkaHeaders.RECEIVED_TOPIC,
KafkaHeaders.TIMESTAMP,
KafkaHeaders.TIMESTAMP_TYPE,
KafkaHeaders.BATCH_CONVERTED_HEADERS,
KafkaHeaders.NATIVE_HEADERS,
KafkaHeaders.TOPIC,
KafkaHeaders.DELIVERY_ATTEMPT,
KafkaHeaders.LISTENER_INFO,
KafkaHeaders.GROUP_ID));
for (String pattern : patterns) {
this.matchers.add(new SimplePatternBasedHeaderMatcher(pattern));
}
}
/**
* Subclasses can invoke this to add custom {@link HeaderMatcher}s.
* @param matchersToAdd the matchers to add.
* @since 2.3
*/
protected final void addMatchers(HeaderMatcher... matchersToAdd) {
Assert.notNull(matchersToAdd, "'matchersToAdd' cannot be null");
Assert.noNullElements(matchersToAdd, "'matchersToAdd' cannot have null elements");
Collections.addAll(this.matchers, matchersToAdd);
}
/**
* Set to true to map all {@code String} valued outbound headers to {@code byte[]}.
* To map to a {@code String} for inbound, there must be an entry in the rawMappedHeaders map.
* @param mapAllStringsOut true to map all strings.
* @since 2.2.5
* @see #setRawMappedHeaders(Map)
*/
public void setMapAllStringsOut(boolean mapAllStringsOut) {
this.mapAllStringsOut = mapAllStringsOut;
}
protected Charset getCharset() {
return this.charset;
}
/**
* Set the charset to use when mapping String-valued headers to/from byte[]. Default UTF-8.
* @param charset the charset.
* @since 2.2.5
* @see #setRawMappedHeaders(Map)
*/
public void setCharset(Charset charset) {
Assert.notNull(charset, "'charset' cannot be null");
this.charset = charset;
}
/**
* Set the headers to not perform any conversion on (except {@code String} to
* {@code byte[]} for outbound). Inbound headers that match will be mapped as
* {@code byte[]} unless the corresponding boolean in the map value is true,
* in which case it will be mapped as a String.
* @param rawMappedHeaders the header names to not convert and
* @since 2.2.5
* @see #setCharset(Charset)
* @see #setMapAllStringsOut(boolean)
*/
public void setRawMappedHeaders(Map<String, Boolean> rawMappedHeaders) {
if (!ObjectUtils.isEmpty(rawMappedHeaders)) {
this.rawMappedHeaders.clear();
this.rawMappedHeaders.putAll(rawMappedHeaders);
}
}
/**
* Add a raw mapped header.
* @param name the header name.
* @param toString convert to string on inbound when true.
* @since 2.7.1
* @see #setRawMappedHeaders(Map)
*/
public void addRawMappedHeader(String name, boolean toString) {
this.rawMappedHeaders.put(name, toString);
}
protected boolean matches(String header, Object value) {
if (matches(header)) {
if ((header.equals(MessageHeaders.REPLY_CHANNEL) || header.equals(MessageHeaders.ERROR_CHANNEL))
&& !(value instanceof String)) {
this.logger.debug(() -> "Cannot map " + header + " when type is [" + value.getClass()
+ "]; it must be a String");
return false;
}
return true;
}
return false;
}
protected boolean matches(String header) {
for (HeaderMatcher matcher : this.matchers) {
if (matcher.matchHeader(header)) {
return !matcher.isNegated();
}
}
this.logger.debug(() -> MessageFormat.format("headerName=[{0}] WILL NOT be mapped; matched no patterns",
header));
return false;
}
/**
* Check if the value is a String and convert to byte[], if so configured.
* @param key the header name.
* @param value the header value.
* @return the value to add.
* @since 2.2.5
*/
protected Object headerValueToAddOut(String key, Object value) {
Object valueToAdd = mapRawOut(key, value);
if (valueToAdd == null) {
valueToAdd = value;
}
return valueToAdd;
}
@Nullable
private byte[] mapRawOut(String header, Object value) {
if (this.mapAllStringsOut || this.rawMappedHeaders.containsKey(header)) {
if (value instanceof byte[]) {
return (byte[]) value;
}
else if (value instanceof String) {
return ((String) value).getBytes(this.charset);
}
}
return null;
}
/**
* Check if the header value should be mapped to a String, if so configured.
* @param header the header.
* @return the value to add.
*/
protected Object headerValueToAddIn(Header header) {
Object mapped = mapRawIn(header.key(), header.value());
if (mapped == null) {
mapped = header.value();
}
return mapped;
}
@Nullable
private String mapRawIn(String header, byte[] value) {
Boolean asString = this.rawMappedHeaders.get(header);
if (Boolean.TRUE.equals(asString)) {
return new String(value, this.charset);
}
return null;
}
/**
* A matcher for headers.
* @since 2.3
*/
protected interface HeaderMatcher {
/**
* Return true if the header matches.
* @param headerName the header name.
* @return true for a match.
*/
boolean matchHeader(String headerName);
/**
* Return true if this matcher is a negative matcher.
* @return true for a negative matcher.
*/
boolean isNegated();
}
/**
* A matcher that never matches a set of headers.
* @since 2.3
*/
protected static class NeverMatchHeaderMatcher implements HeaderMatcher {
private final Set<String> neverMatchHeaders;
protected NeverMatchHeaderMatcher(String... headers) {
this.neverMatchHeaders = Arrays.stream(headers)
.collect(Collectors.toSet());
}
@Override
public boolean matchHeader(String headerName) {
return this.neverMatchHeaders.contains(headerName);
}
@Override
public boolean isNegated() {
return true;
}
}
/**
* A pattern-based header matcher that matches if the specified
* header matches the specified simple pattern.
* <p> The {@code negate == true} state indicates if the matching should be treated as "not matched".
* @see org.springframework.util.PatternMatchUtils#simpleMatch(String, String)
*/
protected static class SimplePatternBasedHeaderMatcher implements HeaderMatcher {
private static final LogAccessor LOGGER =
new LogAccessor(LogFactory.getLog(SimplePatternBasedHeaderMatcher.class));
private final String pattern;
private final boolean negate;
protected SimplePatternBasedHeaderMatcher(String pattern) {
this(pattern.startsWith("!") ? pattern.substring(1) : pattern, pattern.startsWith("!"));
}
SimplePatternBasedHeaderMatcher(String pattern, boolean negate) {
Assert.notNull(pattern, "Pattern must no be null");
this.pattern = pattern.toLowerCase();
this.negate = negate;
}
@Override
public boolean matchHeader(String headerName) {
String header = headerName.toLowerCase();
if (PatternMatchUtils.simpleMatch(this.pattern, header)) {
LOGGER.debug(() ->
MessageFormat.format(
"headerName=[{0}] WILL " + (this.negate ? "NOT " : "")
+ "be mapped, matched pattern=" + (this.negate ? "!" : "") + "{1}",
headerName, this.pattern));
return true;
}
return false;
}
@Override
public boolean isNegated() {
return this.negate;
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/Acknowledgment.java | /*
* Copyright 2015-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
/**
* Handle for acknowledging the processing of a
* {@link org.apache.kafka.clients.consumer.ConsumerRecord}. Recipients can store the
* reference in asynchronous scenarios, but the internal state should be assumed transient
* (i.e. it cannot be serialized and deserialized later)
*
* @author Marius Bogoevici
* @author Gary Russell
*/
public interface Acknowledgment {
/**
* Invoked when the record or batch for which the acknowledgment has been created has
* been processed. Calling this method implies that all the previous messages in the
* partition have been processed already.
*/
void acknowledge();
/**
* Negatively acknowledge the current record - discard remaining records from the poll
* and re-seek all partitions so that this record will be redelivered after the sleep
* time. Must be called on the consumer thread.
* <p>
* <b>When using group management,
* {@code sleep + time spent processing the previous messages from the poll} must be
* less than the consumer {@code max.poll.interval.ms} property, to avoid a
* rebalance.</b>
* @param sleep the time to sleep.
* @since 2.3
*/
default void nack(long sleep) {
throw new UnsupportedOperationException("nack(sleep) is not supported by this Acknowledgment");
}
/**
* Negatively acknowledge the record at an index in a batch - commit the offset(s) of
* records before the index and re-seek the partitions so that the record at the index
* and subsequent records will be redelivered after the sleep time. Must be called on
* the consumer thread.
* <p>
* <b>When using group management,
* {@code sleep + time spent processing the records before the index} must be less
* than the consumer {@code max.poll.interval.ms} property, to avoid a rebalance.</b>
* @param index the index of the failed record in the batch.
* @param sleep the time to sleep.
* @since 2.3
*/
default void nack(int index, long sleep) {
throw new UnsupportedOperationException("nack(index, sleep) is not supported by this Acknowledgment");
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/AllowDenyCollectionManager.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.function.Predicate;
import org.springframework.util.Assert;
/**
* Class for managing Allow / Deny collections and its predicates.
*
* @param <T> Collection generic type
* @author Tomaz Fernandes
* @since 28/12/20
*/
public final class AllowDenyCollectionManager<T> {
private final Collection<T> allowList;
private final Collection<T> denyList;
private final Collection<Predicate<T>> predicates;
public AllowDenyCollectionManager(Collection<T> allowList, Collection<T> denyList) {
this.allowList = allowList;
this.denyList = denyList;
this.predicates = Collections.singletonList(getDefaultPredicate(allowList, denyList));
}
public AllowDenyCollectionManager(Collection<T> allowList, Collection<T> denyList, Collection<Predicate<T>> predicates) {
Assert.notNull(allowList, () -> "AllowList cannot be null");
Assert.notNull(denyList, () -> "DenyList cannot be null");
Assert.notNull(predicates, () -> "Predicates cannot be null");
this.allowList = allowList;
this.denyList = denyList;
this.predicates = predicates;
}
public Predicate<T> getDefaultPredicate(Collection<T> allowList, Collection<T> denyList) {
return objectToCheck -> !denyList.contains(objectToCheck)
&& (allowList.isEmpty() || allowList.contains(objectToCheck));
}
public boolean isAllowed(T objectToCheck) {
return this.predicates
.stream()
.allMatch(predicate -> predicate.test(objectToCheck));
}
public boolean areAllowed(T[] objects) {
return Arrays.stream(objects)
.allMatch(this::isAllowed);
}
public static <T> AllowDenyCollectionManager<T> createManagerFor(Collection<T> allowList, Collection<T> denyList) {
return new AllowDenyCollectionManager<>(allowList, denyList);
}
public static <T> AllowDenyCollectionManager<T> createManagerFor(Collection<T> allowList, Collection<T> denyList, Collection<Predicate<T>> predicates) {
return new AllowDenyCollectionManager<>(allowList, denyList, predicates);
}
public boolean hasNoRestrictions() {
return this.allowList.isEmpty()
&& this.denyList.isEmpty();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/CompositeProducerListener.java | /*
* Copyright 2018-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.springframework.util.Assert;
/**
* A {@link ProducerListener} that delegates to a collection of listeners.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @author Artem Bilan
*
* @since 2.1.6
*
*/
public class CompositeProducerListener<K, V> implements ProducerListener<K, V> {
private final List<ProducerListener<K, V>> delegates = new CopyOnWriteArrayList<>();
@SafeVarargs
@SuppressWarnings("varargs")
public CompositeProducerListener(ProducerListener<K, V>... delegates) {
setDelegates(delegates);
}
@SafeVarargs
@SuppressWarnings("varargs")
public final void setDelegates(ProducerListener<K, V>... delegates) {
Assert.notNull(delegates, "'delegates' cannot be null");
Assert.noNullElements(delegates, "'delegates' cannot contain null elements");
this.delegates.clear();
this.delegates.addAll(Arrays.asList(delegates));
}
protected List<ProducerListener<K, V>> getDelegates() {
return this.delegates;
}
public void addDelegate(ProducerListener<K, V> delegate) {
this.delegates.add(delegate);
}
public boolean removeDelegate(ProducerListener<K, V> delegate) {
return this.delegates.remove(delegate);
}
@Override
public void onSuccess(ProducerRecord<K, V> producerRecord, RecordMetadata recordMetadata) {
this.delegates.forEach(d -> d.onSuccess(producerRecord, recordMetadata));
}
@Override
public void onError(ProducerRecord<K, V> producerRecord, RecordMetadata recordMetadata, Exception exception) {
this.delegates.forEach(d -> d.onError(producerRecord, recordMetadata, exception));
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/DefaultKafkaHeaderMapper.java | /*
* Copyright 2017-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.springframework.lang.Nullable;
import org.springframework.messaging.MessageHeaders;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.MimeType;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.deser.std.StdNodeBasedDeserializer;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.fasterxml.jackson.databind.node.TextNode;
import com.fasterxml.jackson.databind.type.TypeFactory;
/**
* Default header mapper for Apache Kafka.
* Most headers in {@link KafkaHeaders} are not mapped on outbound messages.
* The exceptions are correlation and reply headers for request/reply
* messaging.
* Header types are added to a special header {@link #JSON_TYPES}.
*
* @author Gary Russell
* @author Artem Bilan
*
* @since 1.3
*
*/
public class DefaultKafkaHeaderMapper extends AbstractKafkaHeaderMapper {
private static final String JAVA_LANG_STRING = "java.lang.String";
private static final Set<String> TRUSTED_ARRAY_TYPES =
new HashSet<>(Arrays.asList(
"[B",
"[I",
"[J",
"[F",
"[D",
"[C"
));
private static final List<String> DEFAULT_TRUSTED_PACKAGES =
Arrays.asList(
"java.lang",
"java.net",
"java.util",
"org.springframework.util"
);
private static final List<String> DEFAULT_TO_STRING_CLASSES =
Arrays.asList(
"org.springframework.util.MimeType",
"org.springframework.http.MediaType"
);
/**
* Header name for java types of other headers.
*/
public static final String JSON_TYPES = "spring_json_header_types";
private final ObjectMapper objectMapper;
private final Set<String> trustedPackages = new LinkedHashSet<>(DEFAULT_TRUSTED_PACKAGES);
private final Set<String> toStringClasses = new LinkedHashSet<>(DEFAULT_TO_STRING_CLASSES);
private boolean encodeStrings;
/**
* Construct an instance with the default object mapper and default header patterns
* for outbound headers; all inbound headers are mapped. The default pattern list is
* {@code "!id", "!timestamp" and "*"}. In addition, most of the headers in
* {@link KafkaHeaders} are never mapped as headers since they represent data in
* consumer/producer records.
* @see #DefaultKafkaHeaderMapper(ObjectMapper)
*/
public DefaultKafkaHeaderMapper() {
this(JacksonUtils.enhancedObjectMapper());
}
/**
* Construct an instance with the provided object mapper and default header patterns
* for outbound headers; all inbound headers are mapped. The patterns are applied in
* order, stopping on the first match (positive or negative). Patterns are negated by
* preceding them with "!". The default pattern list is
* {@code "!id", "!timestamp" and "*"}. In addition, most of the headers in
* {@link KafkaHeaders} are never mapped as headers since they represent data in
* consumer/producer records.
* @param objectMapper the object mapper.
* @see org.springframework.util.PatternMatchUtils#simpleMatch(String, String)
*/
public DefaultKafkaHeaderMapper(ObjectMapper objectMapper) {
this(objectMapper,
"!" + MessageHeaders.ID,
"!" + MessageHeaders.TIMESTAMP,
"*");
}
/**
* Construct an instance with a default object mapper and the provided header patterns
* for outbound headers; all inbound headers are mapped. The patterns are applied in
* order, stopping on the first match (positive or negative). Patterns are negated by
* preceding them with "!". The patterns will replace the default patterns; you
* generally should not map the {@code "id" and "timestamp"} headers. Note:
* most of the headers in {@link KafkaHeaders} are ever mapped as headers since they
* represent data in consumer/producer records.
* @param patterns the patterns.
* @see org.springframework.util.PatternMatchUtils#simpleMatch(String, String)
*/
public DefaultKafkaHeaderMapper(String... patterns) {
this(new ObjectMapper(), patterns);
}
/**
* Construct an instance with the provided object mapper and the provided header
* patterns for outbound headers; all inbound headers are mapped. The patterns are
* applied in order, stopping on the first match (positive or negative). Patterns are
* negated by preceding them with "!". The patterns will replace the default patterns;
* you generally should not map the {@code "id" and "timestamp"} headers. Note: most
* of the headers in {@link KafkaHeaders} are never mapped as headers since they
* represent data in consumer/producer records.
* @param objectMapper the object mapper.
* @param patterns the patterns.
* @see org.springframework.util.PatternMatchUtils#simpleMatch(String, String)
*/
public DefaultKafkaHeaderMapper(ObjectMapper objectMapper, String... patterns) {
super(patterns);
Assert.notNull(objectMapper, "'objectMapper' must not be null");
Assert.noNullElements(patterns, "'patterns' must not have null elements");
this.objectMapper = objectMapper;
this.objectMapper
.registerModule(new SimpleModule().addDeserializer(MimeType.class, new MimeTypeJsonDeserializer()));
}
/**
* Return the object mapper.
* @return the mapper.
*/
protected ObjectMapper getObjectMapper() {
return this.objectMapper;
}
/**
* Provide direct access to the trusted packages set for subclasses.
* @return the trusted packages.
* @since 2.2
*/
protected Set<String> getTrustedPackages() {
return this.trustedPackages;
}
/**
* Provide direct access to the toString() classes by subclasses.
* @return the toString() classes.
* @since 2.2
*/
protected Set<String> getToStringClasses() {
return this.toStringClasses;
}
protected boolean isEncodeStrings() {
return this.encodeStrings;
}
/**
* Set to true to encode String-valued headers as JSON ("..."), by default just the
* raw String value is converted to a byte array using the configured charset. Set to
* true if a consumer of the outbound record is using Spring for Apache Kafka version
* less than 2.3
* @param encodeStrings true to encode (default false).
* @since 2.3
*/
public void setEncodeStrings(boolean encodeStrings) {
this.encodeStrings = encodeStrings;
}
/**
* Add packages to the trusted packages list (default {@code java.util, java.lang}) used
* when constructing objects from JSON.
* If any of the supplied packages is {@code "*"}, all packages are trusted.
* If a class for a non-trusted package is encountered, the header is returned to the
* application with value of type {@link NonTrustedHeaderType}.
* @param packagesToTrust the packages to trust.
*/
public void addTrustedPackages(String... packagesToTrust) {
if (packagesToTrust != null) {
for (String trusted : packagesToTrust) {
if ("*".equals(trusted)) {
this.trustedPackages.clear();
break;
}
else {
this.trustedPackages.add(trusted);
}
}
}
}
/**
* Add class names that the outbound mapper should perform toString() operations on
* before mapping.
* @param classNames the class names.
* @since 2.2
*/
public void addToStringClasses(String... classNames) {
this.toStringClasses.addAll(Arrays.asList(classNames));
}
@Override
public void fromHeaders(MessageHeaders headers, Headers target) {
final Map<String, String> jsonHeaders = new HashMap<>();
final ObjectMapper headerObjectMapper = getObjectMapper();
headers.forEach((key, rawValue) -> {
if (matches(key, rawValue)) {
Object valueToAdd = headerValueToAddOut(key, rawValue);
if (valueToAdd instanceof byte[]) {
target.add(new RecordHeader(key, (byte[]) valueToAdd));
}
else {
try {
String className = valueToAdd.getClass().getName();
boolean encodeToJson = this.encodeStrings;
if (this.toStringClasses.contains(className)) {
valueToAdd = valueToAdd.toString();
className = JAVA_LANG_STRING;
encodeToJson = true;
}
if (!encodeToJson && valueToAdd instanceof String) {
target.add(new RecordHeader(key, ((String) valueToAdd).getBytes(getCharset())));
className = JAVA_LANG_STRING;
}
else {
target.add(new RecordHeader(key, headerObjectMapper.writeValueAsBytes(valueToAdd)));
}
jsonHeaders.put(key, className);
}
catch (Exception e) {
logger.debug(e, () -> "Could not map " + key + " with type " + rawValue.getClass().getName());
}
}
}
});
if (jsonHeaders.size() > 0) {
try {
target.add(new RecordHeader(JSON_TYPES, headerObjectMapper.writeValueAsBytes(jsonHeaders)));
}
catch (IllegalStateException | JsonProcessingException e) {
logger.error(e, "Could not add json types header");
}
}
}
@Override
public void toHeaders(Headers source, final Map<String, Object> headers) {
final Map<String, String> jsonTypes = decodeJsonTypes(source);
source.forEach(header -> {
if (header.key().equals(KafkaHeaders.DELIVERY_ATTEMPT)) {
headers.put(header.key(), ByteBuffer.wrap(header.value()).getInt());
}
else if (header.key().equals(KafkaHeaders.LISTENER_INFO)) {
headers.put(header.key(), new String(header.value(), getCharset()));
}
else if (!(header.key().equals(JSON_TYPES))) {
if (jsonTypes != null && jsonTypes.containsKey(header.key())) {
String requestedType = jsonTypes.get(header.key());
populateJsonValueHeader(header, requestedType, headers);
}
else {
headers.put(header.key(), headerValueToAddIn(header));
}
}
});
}
private void populateJsonValueHeader(Header header, String requestedType, Map<String, Object> headers) {
Class<?> type = Object.class;
boolean trusted = false;
try {
trusted = trusted(requestedType);
if (trusted) {
type = ClassUtils.forName(requestedType, null);
}
}
catch (Exception e) {
logger.error(e, () -> "Could not load class for header: " + header.key());
}
if (String.class.equals(type) && (header.value().length == 0 || header.value()[0] != '"')) {
headers.put(header.key(), new String(header.value(), getCharset()));
}
else {
if (trusted) {
try {
Object value = decodeValue(header, type);
headers.put(header.key(), value);
}
catch (IOException e) {
logger.error(e, () ->
"Could not decode json type: " + new String(header.value()) + " for key: "
+ header.key());
headers.put(header.key(), header.value());
}
}
else {
headers.put(header.key(), new NonTrustedHeaderType(header.value(), requestedType));
}
}
}
private Object decodeValue(Header h, Class<?> type) throws IOException, LinkageError {
ObjectMapper headerObjectMapper = getObjectMapper();
Object value = headerObjectMapper.readValue(h.value(), type);
if (type.equals(NonTrustedHeaderType.class)) {
// Upstream NTHT propagated; may be trusted here...
NonTrustedHeaderType nth = (NonTrustedHeaderType) value;
if (trusted(nth.getUntrustedType())) {
try {
value = headerObjectMapper.readValue(nth.getHeaderValue(),
ClassUtils.forName(nth.getUntrustedType(), null));
}
catch (Exception e) {
logger.error(e, () -> "Could not decode header: " + nth);
}
}
}
return value;
}
@SuppressWarnings("unchecked")
@Nullable
private Map<String, String> decodeJsonTypes(Headers source) {
Map<String, String> types = null;
Header jsonTypes = source.lastHeader(JSON_TYPES);
if (jsonTypes != null) {
ObjectMapper headerObjectMapper = getObjectMapper();
try {
types = headerObjectMapper.readValue(jsonTypes.value(), Map.class);
}
catch (IOException e) {
logger.error(e, () -> "Could not decode json types: " + new String(jsonTypes.value()));
}
}
return types;
}
protected boolean trusted(String requestedType) {
if (requestedType.equals(NonTrustedHeaderType.class.getName())) {
return true;
}
if (TRUSTED_ARRAY_TYPES.contains(requestedType)) {
return true;
}
String type = requestedType.startsWith("[") ? requestedType.substring(2) : requestedType;
if (!this.trustedPackages.isEmpty()) {
int lastDot = type.lastIndexOf('.');
if (lastDot < 0) {
return false;
}
String packageName = type.substring(0, lastDot);
for (String trustedPackage : this.trustedPackages) {
if (packageName.equals(trustedPackage) || packageName.startsWith(trustedPackage + ".")) {
return true;
}
}
return false;
}
return true;
}
/**
* The {@link StdNodeBasedDeserializer} extension for {@link MimeType} deserialization.
* It is presented here for backward compatibility when older producers send {@link MimeType}
* headers as serialization version.
*/
private class MimeTypeJsonDeserializer extends StdNodeBasedDeserializer<MimeType> {
private static final long serialVersionUID = 1L;
MimeTypeJsonDeserializer() {
super(MimeType.class);
}
@Override
public MimeType convert(JsonNode root, DeserializationContext ctxt) throws IOException {
if (root instanceof TextNode) {
return MimeType.valueOf(root.asText());
}
else {
JsonNode type = root.get("type");
JsonNode subType = root.get("subtype");
JsonNode parameters = root.get("parameters");
Map<String, String> params =
DefaultKafkaHeaderMapper.this.objectMapper.readValue(parameters.traverse(),
TypeFactory.defaultInstance()
.constructMapType(HashMap.class, String.class, String.class));
return new MimeType(type.asText(), subType.asText(), params);
}
}
}
/**
* Represents a header that could not be decoded due to an untrusted type.
*/
public static class NonTrustedHeaderType {
private byte[] headerValue;
private String untrustedType;
public NonTrustedHeaderType() {
}
NonTrustedHeaderType(byte[] headerValue, String untrustedType) { // NOSONAR
this.headerValue = headerValue; // NOSONAR
this.untrustedType = untrustedType;
}
public void setHeaderValue(byte[] headerValue) { // NOSONAR
this.headerValue = headerValue; // NOSONAR array reference
}
public byte[] getHeaderValue() {
return this.headerValue; // NOSONAR
}
public void setUntrustedType(String untrustedType) {
this.untrustedType = untrustedType;
}
public String getUntrustedType() {
return this.untrustedType;
}
@Override
public String toString() {
try {
return "NonTrustedHeaderType [headerValue=" + new String(this.headerValue, StandardCharsets.UTF_8)
+ ", untrustedType=" + this.untrustedType + "]";
}
catch (@SuppressWarnings("unused") Exception e) {
return "NonTrustedHeaderType [headerValue=" + Arrays.toString(this.headerValue) + ", untrustedType="
+ this.untrustedType + "]";
}
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/EndpointHandlerMethod.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.lang.reflect.Method;
import java.util.Arrays;
import org.springframework.beans.factory.BeanCurrentlyInCreationException;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.util.Assert;
import org.springframework.util.ReflectionUtils;
/**
* Handler method for retrying endpoints.
*
* @author Tomaz Fernandes
* @author Gary Russell
* @since 2.7
*
*/
public class EndpointHandlerMethod {
private final Object beanOrClass;
private final String methodName;
private Object bean;
private Method method;
public EndpointHandlerMethod(Object beanOrClass, String methodName) {
Assert.notNull(beanOrClass, () -> "No destination bean or class provided!");
Assert.notNull(methodName, () -> "No method name for destination bean class provided!");
this.beanOrClass = beanOrClass;
this.methodName = methodName;
}
public EndpointHandlerMethod(Object bean, Method method) {
Assert.notNull(bean, () -> "No bean for destination provided!");
Assert.notNull(method, () -> "No method for destination bean class provided!");
this.method = method;
this.bean = bean;
this.beanOrClass = bean.getClass();
this.methodName = method.getName();
}
/**
* Return the method.
* @return the method.
*/
public Method getMethod() {
if (this.beanOrClass instanceof Class) {
return forClass((Class<?>) this.beanOrClass);
}
Assert.state(this.bean != null, "Bean must be resolved before accessing its method");
if (this.bean instanceof EndpointHandlerMethod) {
try {
return Object.class.getMethod("toString");
}
catch (NoSuchMethodException | SecurityException e) {
}
}
return forClass(this.bean.getClass());
}
/**
* Return the method name.
* @return the name.
* @since 2.8
*/
public String getMethodName() {
Assert.state(this.methodName != null, "Unexpected call to getMethodName()");
return this.methodName;
}
public Object resolveBean(BeanFactory beanFactory) {
if (this.bean instanceof EndpointHandlerMethod) {
return ((EndpointHandlerMethod) this.bean).beanOrClass;
}
if (this.bean == null) {
try {
if (this.beanOrClass instanceof Class) {
Class<?> clazz = (Class<?>) this.beanOrClass;
try {
this.bean = beanFactory.getBean(clazz);
}
catch (NoSuchBeanDefinitionException e) {
String beanName = clazz.getSimpleName() + "-handlerMethod";
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition(beanName,
new RootBeanDefinition(clazz));
this.bean = beanFactory.getBean(beanName);
}
}
else {
String beanName = (String) this.beanOrClass;
this.bean = beanFactory.getBean(beanName);
}
}
catch (BeanCurrentlyInCreationException ex) {
this.bean = this;
}
}
return this.bean;
}
private Method forClass(Class<?> clazz) {
if (this.method == null) {
this.method = Arrays.stream(ReflectionUtils.getDeclaredMethods(clazz))
.filter(mthd -> mthd.getName().equals(this.methodName))
.findFirst()
.orElseThrow(() -> new IllegalArgumentException(
String.format("No method %s in class %s", this.methodName, clazz)));
}
return this.method;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/ExponentialBackOffWithMaxRetries.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import org.springframework.util.backoff.ExponentialBackOff;
/**
* Subclass of {@link ExponentialBackOff} that allows the specification of the maximum
* number of retries rather than the maximum elapsed time.
*
* @author Gary Russell
* @since 2.7.3
*
*/
public class ExponentialBackOffWithMaxRetries extends ExponentialBackOff {
private final int maxRetries;
/**
* Construct an instance that will calculate the {@link #setMaxElapsedTime(long)} from
* the maxRetries.
* @param maxRetries the max retries.
*/
public ExponentialBackOffWithMaxRetries(int maxRetries) {
this.maxRetries = maxRetries;
calculateMaxElapsed();
}
/**
* Get the max retries.
* @return the max retries.
*/
public int getMaxRetries() {
return this.maxRetries;
}
@Override
public void setInitialInterval(long initialInterval) {
super.setInitialInterval(initialInterval);
calculateMaxElapsed();
}
@Override
public void setMultiplier(double multiplier) {
super.setMultiplier(multiplier);
calculateMaxElapsed();
}
@Override
public void setMaxInterval(long maxInterval) {
super.setMaxInterval(maxInterval);
calculateMaxElapsed();
}
@Override
public void setMaxElapsedTime(long maxElapsedTime) {
throw new IllegalStateException("'maxElapsedTime' is calculated from the 'maxRetries' property");
}
private void calculateMaxElapsed() {
long maxInterval = getMaxInterval();
long maxElapsed = Math.min(getInitialInterval(), maxInterval);
long current = maxElapsed;
for (int i = 1; i < this.maxRetries; i++) {
long next = Math.min((long) (current * getMultiplier()), maxInterval);
current = next;
maxElapsed += current;
}
super.setMaxElapsedTime(maxElapsed);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/JacksonMimeTypeModule.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.io.IOException;
import org.springframework.util.MimeType;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonSerializer;
import com.fasterxml.jackson.databind.SerializerProvider;
import com.fasterxml.jackson.databind.module.SimpleModule;
/**
* A {@link SimpleModule} extension for {@link MimeType} serialization.
*
* @author Artem Bilan
*
* @since 2.3
*/
public final class JacksonMimeTypeModule extends SimpleModule {
private static final long serialVersionUID = 1L;
public JacksonMimeTypeModule() {
addSerializer(MimeType.class, new MimeTypeSerializer());
}
/**
* Simple {@link JsonSerializer} extension to represent a {@link MimeType} object in the
* target JSON as a plain string.
*/
private static final class MimeTypeSerializer extends JsonSerializer<MimeType> {
MimeTypeSerializer() {
}
@Override
public void serialize(MimeType value, JsonGenerator generator, SerializerProvider serializers)
throws IOException {
generator.writeString(value.toString());
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/JacksonPresent.java | /*
* Copyright 2017-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import org.springframework.util.ClassUtils;
/**
* The utility to check if Jackson JSON processor is present in the classpath.
*
* @author Artem Bilan
* @author Gary Russell
*
* @since 1.3
*/
public final class JacksonPresent {
private static final ClassLoader classLoader = ClassUtils.getDefaultClassLoader(); // NOSONAR
private static final boolean jackson2Present = // NOSONAR
ClassUtils.isPresent("com.fasterxml.jackson.databind.ObjectMapper", classLoader) &&
ClassUtils.isPresent("com.fasterxml.jackson.core.JsonGenerator", classLoader);
public static boolean isJackson2Present() {
return jackson2Present;
}
private JacksonPresent() {
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/JacksonUtils.java | /*
* Copyright 2019-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import org.springframework.util.ClassUtils;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.MapperFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.json.JsonMapper;
/**
* The utilities for Jackson {@link ObjectMapper} instances.
*
* @author Artem Bilan
*
* @since 2.3
*/
public final class JacksonUtils {
private static final boolean JDK8_MODULE_PRESENT =
ClassUtils.isPresent("com.fasterxml.jackson.datatype.jdk8.Jdk8Module", null);
private static final boolean JAVA_TIME_MODULE_PRESENT =
ClassUtils.isPresent("com.fasterxml.jackson.datatype.jsr310.JavaTimeModule", null);
private static final boolean JODA_MODULE_PRESENT =
ClassUtils.isPresent("com.fasterxml.jackson.datatype.joda.JodaModule", null);
private static final boolean KOTLIN_MODULE_PRESENT =
ClassUtils.isPresent("kotlin.Unit", null) &&
ClassUtils.isPresent("com.fasterxml.jackson.module.kotlin.KotlinModule", null);
/**
* Factory for {@link ObjectMapper} instances with registered well-known modules
* and disabled {@link MapperFeature#DEFAULT_VIEW_INCLUSION} and
* {@link DeserializationFeature#FAIL_ON_UNKNOWN_PROPERTIES} features.
* @return the {@link ObjectMapper} instance.
*/
public static ObjectMapper enhancedObjectMapper() {
ObjectMapper objectMapper = JsonMapper.builder()
.configure(MapperFeature.DEFAULT_VIEW_INCLUSION, false)
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.build();
registerWellKnownModulesIfAvailable(objectMapper);
return objectMapper;
}
/**
* Factory for {@link ObjectMapper} instances with registered well-known modules
* and disabled {@link MapperFeature#DEFAULT_VIEW_INCLUSION} and
* {@link DeserializationFeature#FAIL_ON_UNKNOWN_PROPERTIES} features.
* @param classLoader the {@link ClassLoader} for modules to register.
* @return the {@link ObjectMapper} instance.
* @deprecated since 2.7.5 in favor of {@link #enhancedObjectMapper()}
*/
@Deprecated
public static ObjectMapper enhancedObjectMapper(ClassLoader classLoader) {
return enhancedObjectMapper();
}
private static void registerWellKnownModulesIfAvailable(ObjectMapper objectMapper) {
objectMapper.registerModule(new JacksonMimeTypeModule());
if (JDK8_MODULE_PRESENT) {
objectMapper.registerModule(Jdk8ModuleProvider.MODULE);
}
if (JAVA_TIME_MODULE_PRESENT) {
objectMapper.registerModule(JavaTimeModuleProvider.MODULE);
}
if (JODA_MODULE_PRESENT) {
objectMapper.registerModule(JodaModuleProvider.MODULE);
}
if (KOTLIN_MODULE_PRESENT) {
objectMapper.registerModule(KotlinModuleProvider.MODULE);
}
}
private JacksonUtils() {
}
private static final class Jdk8ModuleProvider {
static final com.fasterxml.jackson.databind.Module MODULE =
new com.fasterxml.jackson.datatype.jdk8.Jdk8Module();
}
private static final class JavaTimeModuleProvider {
static final com.fasterxml.jackson.databind.Module MODULE =
new com.fasterxml.jackson.datatype.jsr310.JavaTimeModule();
}
private static final class JodaModuleProvider {
static final com.fasterxml.jackson.databind.Module MODULE =
new com.fasterxml.jackson.datatype.joda.JodaModule();
}
private static final class KotlinModuleProvider {
@SuppressWarnings("deprecation")
static final com.fasterxml.jackson.databind.Module MODULE =
new com.fasterxml.jackson.module.kotlin.KotlinModule();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/JavaUtils.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.util.List;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import org.springframework.lang.Nullable;
import org.springframework.util.CollectionUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
/**
* Chained utility methods to simplify some Java repetitive code. Obtain a reference to
* the singleton {@link #INSTANCE} and then chain calls to the utility methods.
*
* @author Gary Russell
* @author Artem Bilan
*
* @since 2.3
*/
public final class JavaUtils {
/**
* The singleton instance of this utility class.
*/
public static final JavaUtils INSTANCE = new JavaUtils();
private JavaUtils() {
}
/**
* Invoke {@link Consumer#accept(Object)} with the value if the condition is true.
* @param condition the condition.
* @param value the value.
* @param consumer the consumer.
* @param <T> the value type.
* @return this.
*/
public <T> JavaUtils acceptIfCondition(boolean condition, T value, Consumer<T> consumer) {
if (condition) {
consumer.accept(value);
}
return this;
}
/**
* Invoke {@link Consumer#accept(Object)} with the value if it is not null.
* @param value the value.
* @param consumer the consumer.
* @param <T> the value type.
* @return this.
*/
public <T> JavaUtils acceptIfNotNull(@Nullable T value, Consumer<T> consumer) {
if (value != null) {
consumer.accept(value);
}
return this;
}
/**
* Invoke {@link Consumer#accept(Object)} with the value if it is not null or empty.
* @param value the value.
* @param consumer the consumer.
* @return this.
*/
public JavaUtils acceptIfHasText(String value, Consumer<String> consumer) {
if (StringUtils.hasText(value)) {
consumer.accept(value);
}
return this;
}
/**
* Invoke {@link Consumer#accept(Object)} with the value if it is not null or empty.
* @param value the value.
* @param consumer the consumer.
* @param <T> the value type.
* @return this.
*/
public <T> JavaUtils acceptIfNotEmpty(List<T> value, Consumer<List<T>> consumer) {
if (!CollectionUtils.isEmpty(value)) {
consumer.accept(value);
}
return this;
}
/**
* Invoke {@link Consumer#accept(Object)} with the value if it is not null or empty.
* @param value the value.
* @param consumer the consumer.
* @param <T> the value type.
* @return this.
*/
public <T> JavaUtils acceptIfNotEmpty(T[] value, Consumer<T[]> consumer) {
if (!ObjectUtils.isEmpty(value)) {
consumer.accept(value);
}
return this;
}
/**
* Invoke {@link BiConsumer#accept(Object, Object)} with the arguments if the
* condition is true.
* @param condition the condition.
* @param t1 the first consumer argument
* @param t2 the second consumer argument
* @param consumer the consumer.
* @param <T1> the first argument type.
* @param <T2> the second argument type.
* @return this.
*/
public <T1, T2> JavaUtils acceptIfCondition(boolean condition, T1 t1, T2 t2, BiConsumer<T1, T2> consumer) {
if (condition) {
consumer.accept(t1, t2);
}
return this;
}
/**
* Invoke {@link BiConsumer#accept(Object, Object)} with the arguments if the t2
* argument is not null.
* @param t1 the first argument
* @param t2 the second consumer argument
* @param consumer the consumer.
* @param <T1> the first argument type.
* @param <T2> the second argument type.
* @return this.
*/
public <T1, T2> JavaUtils acceptIfNotNull(T1 t1, T2 t2, BiConsumer<T1, T2> consumer) {
if (t2 != null) {
consumer.accept(t1, t2);
}
return this;
}
/**
* Invoke {@link BiConsumer#accept(Object, Object)} with the arguments if the value
* argument is not null or empty.
* @param t1 the first consumer argument.
* @param value the second consumer argument
* @param <T> the first argument type.
* @param consumer the consumer.
* @return this.
*/
public <T> JavaUtils acceptIfHasText(T t1, String value, BiConsumer<T, String> consumer) {
if (StringUtils.hasText(value)) {
consumer.accept(t1, value);
}
return this;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/KafkaHeaderMapper.java | /*
* Copyright 2017-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.util.Map;
import org.apache.kafka.common.header.Headers;
import org.springframework.messaging.MessageHeaders;
/**
* Header mapper for Apache Kafka.
*
* @author Gary Russell
* @since 1.3
*
*/
public interface KafkaHeaderMapper {
/**
* Map from the given {@link MessageHeaders} to the specified target headers.
* @param headers the abstracted MessageHeaders.
* @param target the native target headers.
*/
void fromHeaders(MessageHeaders headers, Headers target);
/**
* Map from the given target headers to abstracted {@link MessageHeaders}.
* @param source the native target headers.
* @param target the target headers.
*/
void toHeaders(Headers source, Map<String, Object> target);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/KafkaHeaders.java | /*
* Copyright 2014-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
/**
* The Kafka specific message headers constants.
*
* @author Artem Bilan
* @author Marius Bogoevici
* @author Gary Russell
* @author Biju Kunjummen
*/
public abstract class KafkaHeaders {
/**
* The prefix for Kafka headers.
*/
public static final String PREFIX = "kafka_";
/**
* The prefix for Kafka headers containing 'received' values.
*/
public static final String RECEIVED = PREFIX + "received";
/**
* The header containing the topic when sending data to Kafka.
*/
public static final String TOPIC = PREFIX + "topic";
/**
* The header containing the message key when sending data to Kafka.
*/
public static final String MESSAGE_KEY = PREFIX + "messageKey";
/**
* The header containing the topic partition when sending data to Kafka.
*/
public static final String PARTITION_ID = PREFIX + "partitionId";
/**
* The header for the partition offset.
*/
public static final String OFFSET = PREFIX + "offset";
/**
* The header containing the raw data received from Kafka ({@code ConsumerRecord} or
* {@code ConsumerRecords}). Usually used to enhance error messages.
*/
public static final String RAW_DATA = PREFIX + "data";
/**
* The header containing the {@code RecordMetadata} object after successful send to the topic.
*/
public static final String RECORD_METADATA = PREFIX + "recordMetadata";
/**
* The header for the {@link Acknowledgment}.
*/
public static final String ACKNOWLEDGMENT = PREFIX + "acknowledgment";
/**
* The header for the {@code Consumer} object.
*/
public static final String CONSUMER = PREFIX + "consumer";
/**
* The header containing the topic from which the message was received.
*/
public static final String RECEIVED_TOPIC = RECEIVED + "Topic";
/**
* The header containing the message key for the received message.
*/
public static final String RECEIVED_MESSAGE_KEY = RECEIVED + "MessageKey";
/**
* The header containing the topic partition for the received message.
*/
public static final String RECEIVED_PARTITION_ID = RECEIVED + "PartitionId";
/**
* The header for holding the {@link org.apache.kafka.common.record.TimestampType type} of timestamp.
*/
public static final String TIMESTAMP_TYPE = PREFIX + "timestampType";
/**
* The header for holding the timestamp of the producer record.
*/
public static final String TIMESTAMP = PREFIX + "timestamp";
/**
* The header for holding the timestamp of the consumer record.
*/
public static final String RECEIVED_TIMESTAMP = PREFIX + "receivedTimestamp";
/**
* The header for holding the native headers of the consumer record; only provided
* if no header mapper is present.
*/
public static final String NATIVE_HEADERS = PREFIX + "nativeHeaders";
/**
* The header for a list of Maps of converted native Kafka headers. Used for batch
* listeners; the map at a particular list position corresponds to the data in the
* payload list position.
*/
public static final String BATCH_CONVERTED_HEADERS = PREFIX + "batchConvertedHeaders";
/**
* The header containing information to correlate requests/replies.
* Type: byte[].
* @since 2.1.3
*/
public static final String CORRELATION_ID = PREFIX + "correlationId";
/**
* The header containing the default reply topic.
* Type: byte[].
* @since 2.1.3
*/
public static final String REPLY_TOPIC = PREFIX + "replyTopic";
/**
* The header containing a partition number on which to send the reply.
* Type: binary (int) in byte[].
* @since 2.1.3
*/
public static final String REPLY_PARTITION = PREFIX + "replyPartition";
/**
* Exception class name for a record published sent to a dead-letter topic.
* @since 2.2
*/
public static final String DLT_EXCEPTION_FQCN = PREFIX + "dlt-exception-fqcn";
/**
* Exception cause class name for a record published sent to a dead-letter topic.
* @since 2.8
*/
public static final String DLT_EXCEPTION_CAUSE_FQCN = PREFIX + "dlt-exception-cause-fqcn";
/**
* Exception stack trace for a record published to a dead-letter topic.
* @since 2.2
*/
public static final String DLT_EXCEPTION_STACKTRACE = PREFIX + "dlt-exception-stacktrace";
/**
* Exception message for a record published to a dead-letter topic.
* @since 2.2
*/
public static final String DLT_EXCEPTION_MESSAGE = PREFIX + "dlt-exception-message";
/**
* Exception stack trace for a record published to a dead-letter topic with a key
* deserialization exception.
* @since 2.7
*/
public static final String DLT_KEY_EXCEPTION_STACKTRACE = PREFIX + "dlt-key-exception-stacktrace";
/**
* Exception message for a record published to a dead-letter topic with a key
* deserialization exception.
* @since 2.7
*/
public static final String DLT_KEY_EXCEPTION_MESSAGE = PREFIX + "dlt-key-exception-message";
/**
* Exception class name for a record published sent to a dead-letter topic with a key
* deserialization exception.
* @since 2.7
*/
public static final String DLT_KEY_EXCEPTION_FQCN = PREFIX + "dlt-key-exception-fqcn";
/**
* Original topic for a record published to a dead-letter topic.
* @since 2.2
*/
public static final String DLT_ORIGINAL_TOPIC = PREFIX + "dlt-original-topic";
/**
* Original partition for a record published to a dead-letter topic.
* @since 2.2
*/
public static final String DLT_ORIGINAL_PARTITION = PREFIX + "dlt-original-partition";
/**
* Original offset for a record published to a dead-letter topic.
* @since 2.2
*/
public static final String DLT_ORIGINAL_OFFSET = PREFIX + "dlt-original-offset";
/**
* Consumer group that failed to consumer a record published to a dead-letter topic.
* @since 2.8
*/
public static final String DLT_ORIGINAL_CONSUMER_GROUP = PREFIX + "dlt-original-consumer-group";
/**
* Original timestamp for a record published to a dead-letter topic.
* @since 2.2
*/
public static final String DLT_ORIGINAL_TIMESTAMP = PREFIX + "dlt-original-timestamp";
/**
* Original timestamp type for a record published to a dead-letter topic.
* @since 2.2
*/
public static final String DLT_ORIGINAL_TIMESTAMP_TYPE = PREFIX + "dlt-original-timestamp-type";
/**
* For inbound messages, the container's {@code group.id} consumer property.
* @since 2.3
*/
public static final String GROUP_ID = PREFIX + "groupId";
/**
* For inbound messages, when container retries are enabled the delivery attempt.
* @since 2.5
*/
public static final String DELIVERY_ATTEMPT = PREFIX + "deliveryAttempt";
/**
* Exception class name for a record published sent to another topic.
* @since 2.2
*/
public static final String EXCEPTION_FQCN = PREFIX + "exception-fqcn";
/**
* Exception class name for a record published sent to another topic.
* @since 2.8
*/
public static final String EXCEPTION_CAUSE_FQCN = PREFIX + "exception-fqcn";
/**
* Exception stack trace for a record published to another topic.
* @since 2.2
*/
public static final String EXCEPTION_STACKTRACE = PREFIX + "exception-stacktrace";
/**
* Exception message for a record published to another topic.
* @since 2.2
*/
public static final String EXCEPTION_MESSAGE = PREFIX + "exception-message";
/**
* Exception stack trace for a record published to another topic with a key
* deserialization exception.
* @since 2.7
*/
public static final String KEY_EXCEPTION_STACKTRACE = PREFIX + "key-exception-stacktrace";
/**
* Exception message for a record published to another topic with a key
* deserialization exception.
* @since 2.7
*/
public static final String KEY_EXCEPTION_MESSAGE = PREFIX + "key-exception-message";
/**
* Exception class name for a record published sent to another topic with a key
* deserialization exception.
* @since 2.7
*/
public static final String KEY_EXCEPTION_FQCN = PREFIX + "key-exception-fqcn";
/**
* Original topic for a record published to another topic.
* @since 2.2
*/
public static final String ORIGINAL_TOPIC = PREFIX + "original-topic";
/**
* Original partition for a record published to another topic.
* @since 2.2
*/
public static final String ORIGINAL_PARTITION = PREFIX + "original-partition";
/**
* Original offset for a record published to another topic.
* @since 2.2
*/
public static final String ORIGINAL_OFFSET = PREFIX + "original-offset";
/**
* Consumer group that failed to consumer a record published to another topic.
* @since 2.8
*/
public static final String ORIGINAL_CONSUMER_GROUP = PREFIX + "dlt-original-consumer-group";
/**
* Original timestamp for a record published to another topic.
* @since 2.2
*/
public static final String ORIGINAL_TIMESTAMP = PREFIX + "original-timestamp";
/**
* Original timestamp type for a record published to another topic.
* @since 2.2
*/
public static final String ORIGINAL_TIMESTAMP_TYPE = PREFIX + "original-timestamp-type";
/**
* The header containing a list of conversion failures (for batch listeners only).
* Type: List<ConversionException>.
* @since 2.8
*/
public static final String CONVERSION_FAILURES = PREFIX + "conversionFailures";
/**
* Arbitrary static information about the listener receiving this record.
* @since 2.8.4
*/
public static final String LISTENER_INFO = PREFIX + "listenerInfo";
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/KafkaNull.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
/**
* This class represents NULL Kafka payload.
*
* @author Dariusz Szablinski
* @author Gary Russell
* @since 1.0.3
*/
public final class KafkaNull {
/**
* Instance of KafkaNull.
*/
public static final KafkaNull INSTANCE = new KafkaNull();
private KafkaNull() {
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/KafkaStreamBrancher.java | /*
* Copyright 2019-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.function.Consumer;
import org.apache.kafka.streams.kstream.KStream;
import org.apache.kafka.streams.kstream.Predicate;
/**
* Provides a method-chaining way to build {@link org.apache.kafka.streams.kstream.KStream#branch branches} in
* Kafka Streams processor topology.
* <p>
* Example of usage:
* <pre>
* {@code
* new KafkaStreamBrancher<String, String>()
* .branch((key, value) -> value.contains("A"), ks->ks.to("A"))
* .branch((key, value) -> value.contains("B"), ks->ks.to("B"))
* //default branch should not necessarily be defined in the end
* .defaultBranch(ks->ks.to("C"))
* .onTopOf(builder.stream("source"))
* }
* </pre>
*
* @param <K> Type of keys
* @param <V> Type of values
*
* @author Ivan Ponomarev
* @author Artem Bilan
*
* @since 2.2.4
*/
public final class KafkaStreamBrancher<K, V> {
private final List<Predicate<? super K, ? super V>> predicateList = new ArrayList<>();
private final List<Consumer<? super KStream<K, V>>> consumerList = new ArrayList<>();
private Consumer<? super KStream<K, V>> defaultConsumer;
/**
* Defines a new branch.
* @param predicate {@link Predicate} instance
* @param consumer The consumer of this branch's {@code KStream}
* @return {@code this}
*/
public KafkaStreamBrancher<K, V> branch(Predicate<? super K, ? super V> predicate,
Consumer<? super KStream<K, V>> consumer) {
this.predicateList.add(Objects.requireNonNull(predicate));
this.consumerList.add(Objects.requireNonNull(consumer));
return this;
}
/**
* Defines a default branch. All the messages that were not dispatched to other branches will be directed
* to this stream. This method should not necessarily be called in the end
* of chain.
* @param consumer The consumer of this branch's {@code KStream}
* @return {@code this}
*/
public KafkaStreamBrancher<K, V> defaultBranch(Consumer<? super KStream<K, V>> consumer) {
this.defaultConsumer = Objects.requireNonNull(consumer);
return this;
}
/**
* Terminating method that builds branches on top of given {@code KStream}.
* @param stream {@code KStream} to split
* @return the provided stream
*/
public KStream<K, V> onTopOf(KStream<K, V> stream) {
if (this.defaultConsumer != null) {
this.predicateList.add((k, v) -> true);
this.consumerList.add(this.defaultConsumer);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
Predicate<? super K, ? super V>[] predicates = this.predicateList.toArray(new Predicate[0]);
@SuppressWarnings("deprecation")
KStream<K, V>[] result = stream.branch(predicates);
for (int i = 0; i < this.consumerList.size(); i++) {
this.consumerList.get(i).accept(result[i]);
}
return stream;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/KafkaUtils.java | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.lang.reflect.Method;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.time.Duration;
import java.util.Collection;
import java.util.Map;
import java.util.function.Function;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.messaging.Message;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
/**
* Utility methods.
*
* @author Gary Russell
*
* @since 2.2
*
*/
public final class KafkaUtils {
private static Function<ProducerRecord<?, ?>, String> prFormatter = rec -> rec.toString();
private static Function<ConsumerRecord<?, ?>, String> crFormatter =
rec -> rec.topic() + "-" + rec.partition() + "@" + rec.offset();
/**
* True if micrometer is on the class path.
*/
public static final boolean MICROMETER_PRESENT = ClassUtils.isPresent(
"io.micrometer.core.instrument.MeterRegistry", KafkaUtils.class.getClassLoader());
private static final ThreadLocal<String> GROUP_IDS = new ThreadLocal<>();
/**
* Return true if the method return type is {@link Message} or
* {@code Collection<Message<?>>}.
* @param method the method.
* @return true if it returns message(s).
*/
public static boolean returnTypeMessageOrCollectionOf(Method method) {
Type returnType = method.getGenericReturnType();
if (returnType.equals(Message.class)) {
return true;
}
if (returnType instanceof ParameterizedType) {
ParameterizedType prt = (ParameterizedType) returnType;
Type rawType = prt.getRawType();
if (rawType.equals(Message.class)) {
return true;
}
if (rawType.equals(Collection.class)) {
Type collectionType = prt.getActualTypeArguments()[0];
if (collectionType.equals(Message.class)) {
return true;
}
return collectionType instanceof ParameterizedType
&& ((ParameterizedType) collectionType).getRawType().equals(Message.class);
}
}
return false;
}
/**
* Set the group id for the consumer bound to this thread.
* @param groupId the group id.
* @since 2.3
*/
public static void setConsumerGroupId(String groupId) {
KafkaUtils.GROUP_IDS.set(groupId);
}
/**
* Get the group id for the consumer bound to this thread.
* @return the group id.
* @since 2.3
*/
public static String getConsumerGroupId() {
return KafkaUtils.GROUP_IDS.get();
}
/**
* Clear the group id for the consumer bound to this thread.
* @since 2.3
*/
public static void clearConsumerGroupId() {
KafkaUtils.GROUP_IDS.remove();
}
/**
* Return the timeout to use when sending records. If the
* {@link ProducerConfig#DELIVERY_TIMEOUT_MS_CONFIG} is not configured, or is not a
* number or a String that can be parsed as a long, the {@link ProducerConfig} default
* value (plus the buffer) is used.
* @param producerProps the producer properties.
* @param buffer a buffer to add to the configured
* {@link ProducerConfig#DELIVERY_TIMEOUT_MS_CONFIG} to prevent timing out before the
* Kafka producer.
* @param min a minimum value to apply after adding the buffer to the configured
* timeout.
* @return the timeout to use.
* @since 2.7
*/
public static Duration determineSendTimeout(Map<String, Object> producerProps, long buffer, long min) {
Object dt = producerProps.get(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG);
if (dt instanceof Number) {
return Duration.ofMillis(Math.max(((Number) dt).longValue() + buffer, min));
}
else if (dt instanceof String) {
try {
return Duration.ofMillis(Math.max(Long.parseLong((String) dt) + buffer, min));
}
catch (@SuppressWarnings("unused") NumberFormatException ex) {
}
}
return Duration.ofMillis(Math.max(
((Integer) ProducerConfig.configDef().defaultValues()
.get(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG)).longValue() + buffer,
min));
}
/**
* Set a formatter for logging {@link ConsumerRecord}s; default is
* {@code topic-partition@offset}.
* @param formatter a function to format the record as a String
* @since 2.7.12
*/
public static void setConsumerRecordFormatter(Function<ConsumerRecord<?, ?>, String> formatter) {
Assert.notNull(formatter, "'formatter' cannot be null");
crFormatter = formatter;
}
/**
* Set a formatter for logging {@link ProducerRecord}s; default is
* {@link ProducerRecord#toString()}.
* @param formatter a function to format the record as a String
* @since 2.7.12
*/
public static void setProducerRecordFormatter(Function<ProducerRecord<?, ?>, String> formatter) {
Assert.notNull(formatter, "'formatter' cannot be null");
prFormatter = formatter;
}
/**
* Format the {@link ConsumerRecord} for logging; default
* {@code topic-partition@offset}.
* @param record the record to format.
* @return the formatted String.
* @since 2.7.12
*/
public static String format(ConsumerRecord<?, ?> record) {
return crFormatter.apply(record);
}
/**
* Format the {@link ConsumerRecord} for logging; default
* {@code topic-partition@offset}. Provided for backwards compatibility only.
* @param record the record to format.
* @param full use {@link ConsumerRecord#toString()}.
* @return the formatted String.
* @since 2.7.12
* @deprecated in favor of {@link #format(ConsumerRecord)}.
*/
@Deprecated
public static String format(ConsumerRecord<?, ?> record, boolean full) {
if (full) {
return record.toString();
}
return crFormatter.apply(record);
}
/**
* Format the {@link ProducerRecord} for logging; default
* {@link ProducerRecord}{@link #toString()}.
* @param record the record to format.
* @return the formatted String.
* @since 2.7.12
*/
public static String format(ProducerRecord<?, ?> record) {
return prFormatter.apply(record);
}
private KafkaUtils() {
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/LogIfLevelEnabled.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.util.function.Supplier;
import org.springframework.core.log.LogAccessor;
import org.springframework.util.Assert;
/**
* Wrapper for a commons-logging Log supporting configurable
* logging levels.
*
* @author Gary Russell
* @since 2.1.2
*
*/
public final class LogIfLevelEnabled {
private final LogAccessor logger;
private final Level level;
public LogIfLevelEnabled(LogAccessor logger, Level level) {
Assert.notNull(logger, "'logger' cannot be null");
Assert.notNull(level, "'level' cannot be null");
this.logger = logger;
this.level = level;
}
/**
* Logging levels.
*/
public enum Level {
/**
* Fatal.
*/
FATAL,
/**
* Error.
*/
ERROR,
/**
* Warn.
*/
WARN,
/**
* Info.
*/
INFO,
/**
* Debug.
*/
DEBUG,
/**
* Trace.
*/
TRACE
}
public void log(Supplier<CharSequence> messageSupplier) {
switch (this.level) {
case FATAL:
fatal(messageSupplier, null);
break;
case ERROR:
error(messageSupplier, null);
break;
case WARN:
warn(messageSupplier, null);
break;
case INFO:
info(messageSupplier, null);
break;
case DEBUG:
debug(messageSupplier, null);
break;
case TRACE:
trace(messageSupplier, null);
break;
}
}
public void log(Supplier<CharSequence> messageSupplier, Throwable thrown) {
switch (this.level) {
case FATAL:
fatal(messageSupplier, thrown);
break;
case ERROR:
error(messageSupplier, thrown);
break;
case WARN:
warn(messageSupplier, thrown);
break;
case INFO:
info(messageSupplier, thrown);
break;
case DEBUG:
debug(messageSupplier, thrown);
break;
case TRACE:
trace(messageSupplier, thrown);
break;
}
}
private void fatal(Supplier<CharSequence> messageSupplier, Throwable thrown) {
if (thrown != null) {
this.logger.fatal(thrown, messageSupplier);
}
else {
this.logger.fatal(messageSupplier);
}
}
private void error(Supplier<CharSequence> messageSupplier, Throwable thrown) {
if (thrown != null) {
this.logger.error(thrown, messageSupplier);
}
else {
this.logger.error(messageSupplier);
}
}
private void warn(Supplier<CharSequence> messageSupplier, Throwable thrown) {
if (thrown != null) {
this.logger.warn(thrown, messageSupplier);
}
else {
this.logger.warn(messageSupplier);
}
}
private void info(Supplier<CharSequence> messageSupplier, Throwable thrown) {
if (thrown != null) {
this.logger.info(thrown, messageSupplier);
}
else {
this.logger.info(messageSupplier);
}
}
private void debug(Supplier<CharSequence> messageSupplier, Throwable thrown) {
if (thrown != null) {
this.logger.debug(thrown, messageSupplier);
}
else {
this.logger.debug(messageSupplier);
}
}
private void trace(Supplier<CharSequence> messageSupplier, Throwable thrown) {
if (thrown != null) {
this.logger.trace(thrown, messageSupplier);
}
else {
this.logger.trace(messageSupplier);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/LoggingProducerListener.java | /*
* Copyright 2015-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.springframework.core.log.LogAccessor;
import org.springframework.lang.Nullable;
import org.springframework.util.ObjectUtils;
/**
* The {@link ProducerListener} that logs exceptions thrown when sending messages.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Marius Bogoevici
* @author Gary Russell
*/
public class LoggingProducerListener<K, V> implements ProducerListener<K, V> {
/**
* Default max content logged.
*/
public static final int DEFAULT_MAX_CONTENT_LOGGED = 100;
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); // NOSONAR
private boolean includeContents = true;
private int maxContentLogged = DEFAULT_MAX_CONTENT_LOGGED;
/**
* Whether the log message should include the contents (key and payload).
*
* @param includeContents true if the contents of the message should be logged
*/
public void setIncludeContents(boolean includeContents) {
this.includeContents = includeContents;
}
/**
* The maximum amount of data to be logged for either key or password. As message sizes may vary and
* become fairly large, this allows limiting the amount of data sent to logs.
* Default {@value #DEFAULT_MAX_CONTENT_LOGGED}.
*
* @param maxContentLogged the maximum amount of data being logged.
*/
public void setMaxContentLogged(int maxContentLogged) {
this.maxContentLogged = maxContentLogged;
}
@Override
public void onError(ProducerRecord<K, V> record, @Nullable RecordMetadata recordMetadata, Exception exception) {
this.logger.error(exception, () -> {
StringBuffer logOutput = new StringBuffer();
logOutput.append("Exception thrown when sending a message");
if (this.includeContents) {
logOutput.append(" with key='")
.append(keyOrValue(record.key()))
.append("'")
.append(" and payload='")
.append(keyOrValue(record.value()))
.append("'");
}
logOutput.append(" to topic ").append(record.topic());
if (record.partition() != null) {
logOutput.append(" and partition ").append(recordMetadata != null
? recordMetadata.partition()
: record.partition());
}
logOutput.append(":");
return logOutput.toString();
});
}
private String keyOrValue(Object keyOrValue) {
if (keyOrValue instanceof byte[]) {
return "byte[" + ((byte[]) keyOrValue).length + "]";
}
else {
return toDisplayString(ObjectUtils.nullSafeToString(keyOrValue), this.maxContentLogged);
}
}
private String toDisplayString(String original, int maxCharacters) {
if (original.length() <= maxCharacters) {
return original;
}
return original.substring(0, maxCharacters) + "...";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/ProducerListener.java | /*
* Copyright 2015-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.springframework.lang.Nullable;
/**
* Listener for handling outbound Kafka messages. Exactly one of its methods will be invoked, depending on whether
* the write has been acknowledged or not.
*
* Its main goal is to provide a stateless singleton delegate for {@link org.apache.kafka.clients.producer.Callback}s,
* which, in all but the most trivial cases, requires creating a separate instance per message.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Marius Bogoevici
* @author Gary Russell
* @author Endika Gutiérrez
*
* @see org.apache.kafka.clients.producer.Callback
*/
public interface ProducerListener<K, V> {
/**
* Invoked after the successful send of a message (that is, after it has been acknowledged by the broker).
* @param producerRecord the actual sent record
* @param recordMetadata the result of the successful send operation
*/
default void onSuccess(ProducerRecord<K, V> producerRecord, RecordMetadata recordMetadata) {
}
/**
* Invoked after an attempt to send a message has failed.
* @param producerRecord the failed record
* @param recordMetadata The metadata for the record that was sent (i.e. the partition
* and offset). If an error occurred, metadata will contain only valid topic and maybe
* the partition. If the partition is not provided in the ProducerRecord and an error
* occurs before partition is assigned, then the partition will be set to
* RecordMetadata.UNKNOWN_PARTITION.
* @param exception the exception thrown
* @since 2.6.2
*/
default void onError(ProducerRecord<K, V> producerRecord, @Nullable RecordMetadata recordMetadata,
Exception exception) {
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/SendResult.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
/**
* Result for a ListenableFuture after a send.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*
*/
public class SendResult<K, V> {
private final ProducerRecord<K, V> producerRecord;
private final RecordMetadata recordMetadata;
public SendResult(ProducerRecord<K, V> producerRecord, RecordMetadata recordMetadata) {
this.producerRecord = producerRecord;
this.recordMetadata = recordMetadata;
}
public ProducerRecord<K, V> getProducerRecord() {
return this.producerRecord;
}
public RecordMetadata getRecordMetadata() {
return this.recordMetadata;
}
@Override
public String toString() {
return "SendResult [producerRecord=" + this.producerRecord + ", recordMetadata=" + this.recordMetadata + "]";
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/SimpleKafkaHeaderMapper.java | /*
* Copyright 2018-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.nio.ByteBuffer;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.springframework.messaging.MessageHeaders;
/**
* A simple header mapper that maps headers directly; for outbound,
* only byte[] headers are mapped; for inbound, headers are mapped
* unchanged, as byte[]. Strings can also be mapped to/from byte.
* See {@link #setRawMappedHeaders(Map)}.
* Most headers in {@link KafkaHeaders} are not mapped on outbound messages.
* The exceptions are correlation and reply headers for request/reply
*
* @author Gary Russell
* @since 2.1.3
*
*/
public class SimpleKafkaHeaderMapper extends AbstractKafkaHeaderMapper {
private static final Set<String> NEVER;
static {
NEVER = new HashSet<>();
NEVER.add(KafkaHeaders.DELIVERY_ATTEMPT);
NEVER.add(KafkaHeaders.LISTENER_INFO);
}
/**
* Construct an instance with the default object mapper and default header patterns
* for outbound headers; all inbound headers are mapped. The default pattern list is
* {@code "!id", "!timestamp" and "*"}. In addition, most of the headers in
* {@link KafkaHeaders} are never mapped as headers since they represent data in
* consumer/producer records.
*/
public SimpleKafkaHeaderMapper() {
super("!" + MessageHeaders.ID,
"!" + MessageHeaders.TIMESTAMP,
"*");
}
/**
* Construct an instance with a default object mapper and the provided header patterns
* for outbound headers; all inbound headers are mapped. The patterns are applied in
* order, stopping on the first match (positive or negative). Patterns are negated by
* preceding them with "!". The patterns will replace the default patterns; you
* generally should not map the {@code "id" and "timestamp"} headers. Note:
* most of the headers in {@link KafkaHeaders} are never mapped as headers since they
* represent data in consumer/producer records.
* @param patterns the patterns.
* @see org.springframework.util.PatternMatchUtils#simpleMatch(String, String)
*/
public SimpleKafkaHeaderMapper(String... patterns) {
super(patterns);
}
@Override
public void fromHeaders(MessageHeaders headers, Headers target) {
headers.forEach((key, value) -> {
if (!NEVER.contains(key)) {
Object valueToAdd = headerValueToAddOut(key, value);
if (valueToAdd instanceof byte[] && matches(key, valueToAdd)) {
target.add(new RecordHeader(key, (byte[]) valueToAdd));
}
}
});
}
@Override
public void toHeaders(Headers source, Map<String, Object> target) {
source.forEach(header -> {
if (header.key().equals(KafkaHeaders.DELIVERY_ATTEMPT)) {
target.put(header.key(), ByteBuffer.wrap(header.value()).getInt());
}
else {
target.put(header.key(), headerValueToAddIn(header));
}
});
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/Suffixer.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.util.Collection;
import java.util.stream.Collectors;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
/**
* Utility class that suffixes strings.
*
* @author Tomaz Fernandes
* @since 2.7
*
*/
public class Suffixer {
private final String suffix;
public Suffixer(String suffix) {
Assert.notNull(suffix, "Suffix cannot be null");
this.suffix = suffix;
}
public String maybeAddTo(String source) {
if (!StringUtils.hasText(this.suffix)) {
return source;
}
return source != null && StringUtils.hasText(source) // Only suffix if there's text
? source.concat(this.suffix)
: source;
}
public Collection<String> maybeAddTo(Collection<String> sources) {
if (!StringUtils.hasText(this.suffix)) {
return sources;
}
return sources
.stream()
.map(source -> maybeAddTo(source))
.collect(Collectors.toList());
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/TopicPartitionOffset.java | /*
* Copyright 2019-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
import java.util.Objects;
import org.apache.kafka.common.TopicPartition;
import org.springframework.lang.Nullable;
/**
* A configuration container to represent a topic name, partition number and, optionally,
* an offset for it. The offset can be:
* <ul>
* <li>{@code null} - do nothing;</li>
* <li>positive (including {@code 0}) - seek to EITHER the absolute offset within the
* partition or an offset relative to the current position for this consumer, depending
* on {@link #isRelativeToCurrent()}.
* </li>
* <li>negative - seek to EITHER the offset relative to the current last offset within
* the partition: {@code consumer.seekToEnd() + initialOffset} OR the relative to the
* current offset for this consumer (if any), depending on
* {@link #isRelativeToCurrent()}.</li>
* </ul>
* Offsets are applied when the container is {@code start()}ed.
* This class is used when manually assigning partitions and for deferred seek operations.
*
* @author Artem Bilan
* @author Gary Russell
*
* @since 2.3
*/
public class TopicPartitionOffset {
/**
* Enumeration for "special" seeks.
*/
public enum SeekPosition {
/**
* Seek to the beginning.
*/
BEGINNING,
/**
* Seek to the end.
*/
END,
/**
* Seek to the time stamp; if no records exist with a timestamp greater than or
* equal to the timestamp seek to the end.
*/
TIMESTAMP
}
private final TopicPartition topicPartition;
private final SeekPosition position;
private Long offset;
private boolean relativeToCurrent;
/**
* Construct an instance with no initial offset management.
* @param topic the topic.
* @param partition the partition.
*/
public TopicPartitionOffset(String topic, int partition) {
this(topic, partition, null, false);
}
/**
* Construct an instance with the provided initial offset with
* {@link #isRelativeToCurrent()} false.
* @param topic the topic.
* @param partition the partition.
* @param offset the offset.
* @see #TopicPartitionOffset(String, int, Long, boolean)
*/
public TopicPartitionOffset(String topic, int partition, Long offset) {
this(topic, partition, offset, false);
}
/**
* Construct an instance with the provided initial offset.
* @param topic the topic.
* @param partition the partition.
* @param offset the initial offset.
* @param relativeToCurrent true for the initial offset to be relative to
* the current consumer position, false for a positive initial offset to
* be absolute and a negative offset relative to the current end of the
* partition.
*/
public TopicPartitionOffset(String topic, int partition, Long offset, boolean relativeToCurrent) {
this.topicPartition = new TopicPartition(topic, partition);
this.offset = offset;
this.relativeToCurrent = relativeToCurrent;
this.position = null;
}
/**
* Construct an instance with the provided {@link SeekPosition}.
* @param topic the topic.
* @param partition the partition.
* @param position {@link SeekPosition}.
*/
public TopicPartitionOffset(String topic, int partition, SeekPosition position) {
this.topicPartition = new TopicPartition(topic, partition);
this.offset = null;
this.relativeToCurrent = false;
this.position = position;
}
/**
* Construct an instance with the provided {@link SeekPosition}.
* @param topic the topic.
* @param partition the partition.
* @param offset the offset from the seek position (or timestamp for
* {@link SeekPosition#TIMESTAMP}).
* @param position {@link SeekPosition}.
* @since 2.3
*/
public TopicPartitionOffset(String topic, int partition, Long offset, @Nullable SeekPosition position) {
this(new TopicPartition(topic, partition), offset, position);
}
/**
* Construct an instance with the provided {@link SeekPosition}.
* @param topicPartition the topic/partition.
* @param offset the offset from the seek position (or timestamp for
* {@link SeekPosition#TIMESTAMP}).
* @param position {@link SeekPosition}.
* @since 2.3
*/
public TopicPartitionOffset(TopicPartition topicPartition, Long offset, @Nullable SeekPosition position) {
this.topicPartition = topicPartition;
this.offset = offset;
this.relativeToCurrent = false;
this.position = position;
}
public TopicPartition getTopicPartition() {
return this.topicPartition;
}
public int getPartition() {
return this.topicPartition.partition();
}
public String getTopic() {
return this.topicPartition.topic();
}
public Long getOffset() {
return this.offset;
}
/**
* Set the offset.
* @param offset the offset.
* @since 2.5.5
*/
public void setOffset(Long offset) {
this.offset = offset;
}
public boolean isRelativeToCurrent() {
return this.relativeToCurrent;
}
/**
* Set whether the offset is relative to the current position.
* @param relativeToCurrent true for relative to current.
* @since 2.5.5
*/
public void setRelativeToCurrent(boolean relativeToCurrent) {
this.relativeToCurrent = relativeToCurrent;
}
public SeekPosition getPosition() {
return this.position;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TopicPartitionOffset that = (TopicPartitionOffset) o;
return Objects.equals(this.topicPartition, that.topicPartition)
&& Objects.equals(this.position, that.position);
}
@Override
public int hashCode() {
return this.topicPartition.hashCode() + this.position.hashCode();
}
@Override
public String toString() {
return "TopicPartitionOffset{" +
"topicPartition=" + this.topicPartition +
", offset=" + this.offset +
", relativeToCurrent=" + this.relativeToCurrent +
(this.position == null ? "" : (", position=" + this.position.name())) +
'}';
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/TransactionSupport.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support;
/**
* Utilities for supporting transactions.
*
* @author Gary Russell
* @since 1.3.7
*
*/
public final class TransactionSupport {
private static final ThreadLocal<String> transactionIdSuffix = new ThreadLocal<>(); // NOSONAR
private TransactionSupport() {
}
public static void setTransactionIdSuffix(String suffix) {
transactionIdSuffix.set(suffix);
}
public static String getTransactionIdSuffix() {
return transactionIdSuffix.get();
}
public static void clearTransactionIdSuffix() {
transactionIdSuffix.remove();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/package-info.java | /**
* Package for kafka support
*/
package org.springframework.kafka.support;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/BatchMessageConverter.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import java.lang.reflect.Type;
import java.util.List;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.lang.NonNull;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
/**
* A Kafka-specific {@link Message} converter strategy.
*
* @author Gary Russell
* @since 1.1
*/
public interface BatchMessageConverter extends MessageConverter {
/**
* Convert a list of {@link ConsumerRecord} to a {@link Message}.
* @param records the records.
* @param acknowledgment the acknowledgment.
* @param consumer the consumer.
* @param payloadType the required payload type.
* @return the message.
*/
@NonNull
Message<?> toMessage(List<ConsumerRecord<?, ?>> records, @Nullable Acknowledgment acknowledgment,
Consumer<?, ?> consumer, Type payloadType);
/**
* Convert a message to a producer record.
* @param message the message.
* @param defaultTopic the default topic to use if no header found.
* @return the producer records.
*/
List<ProducerRecord<?, ?>> fromMessage(Message<?> message, String defaultTopic);
/**
* Return the record converter used by this batch converter, if configured,
* or null.
* @return the converter or null.
* @since 2.1.5
*/
@Nullable
default RecordMessageConverter getRecordMessageConverter() {
return null;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/BatchMessagingMessageConverter.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.utils.Bytes;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.DefaultKafkaHeaderMapper;
import org.springframework.kafka.support.JacksonPresent;
import org.springframework.kafka.support.KafkaHeaderMapper;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.KafkaNull;
import org.springframework.kafka.support.serializer.SerializationUtils;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
import org.springframework.messaging.support.MessageBuilder;
/**
* A Messaging {@link MessageConverter} implementation used with a batch
* message listener; the consumer record values are extracted into a collection in
* the message payload.
* <p>
* Populates {@link KafkaHeaders} based on the {@link ConsumerRecord} onto the returned message.
* Each header is a collection where the position in the collection matches the payload
* position.
* <p>
* If a {@link RecordMessageConverter} is provided, and the batch type is a {@link ParameterizedType}
* with a single generic type parameter, each record will be passed to the converter, thus supporting
* a method signature {@code List<Foo> foos}.
*
* @author Marius Bogoevici
* @author Gary Russell
* @author Dariusz Szablinski
* @author Biju Kunjummen
* @since 1.1
*/
public class BatchMessagingMessageConverter implements BatchMessageConverter {
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); // NOSONAR
private final RecordMessageConverter recordConverter;
private boolean generateMessageId = false;
private boolean generateTimestamp = false;
private KafkaHeaderMapper headerMapper;
private boolean rawRecordHeader;
/**
* Create an instance that does not convert the record values.
*/
public BatchMessagingMessageConverter() {
this(null);
}
/**
* Create an instance that converts record values using the supplied
* converter.
* @param recordConverter the converter.
* @since 1.3.2
*/
public BatchMessagingMessageConverter(RecordMessageConverter recordConverter) {
this.recordConverter = recordConverter;
if (JacksonPresent.isJackson2Present()) {
this.headerMapper = new DefaultKafkaHeaderMapper();
}
}
/**
* Generate {@link Message} {@code ids} for produced messages. If set to {@code false},
* will try to use a default value. By default set to {@code false}.
* @param generateMessageId true if a message id should be generated
*/
public void setGenerateMessageId(boolean generateMessageId) {
this.generateMessageId = generateMessageId;
}
/**
* Generate {@code timestamp} for produced messages. If set to {@code false}, -1 is
* used instead. By default set to {@code false}.
* @param generateTimestamp true if a timestamp should be generated
*/
public void setGenerateTimestamp(boolean generateTimestamp) {
this.generateTimestamp = generateTimestamp;
}
/**
* Set the header mapper to map headers.
* @param headerMapper the mapper.
* @since 1.3
*/
public void setHeaderMapper(KafkaHeaderMapper headerMapper) {
this.headerMapper = headerMapper;
}
@Override
public RecordMessageConverter getRecordMessageConverter() {
return this.recordConverter;
}
/**
* Set to true to add the raw {@code List<ConsumerRecord<?, ?>>} as a header
* {@link KafkaHeaders#RAW_DATA}.
* @param rawRecordHeader true to add the header.
* @since 2.7
*/
public void setRawRecordHeader(boolean rawRecordHeader) {
this.rawRecordHeader = rawRecordHeader;
}
@Override // NOSONAR
public Message<?> toMessage(List<ConsumerRecord<?, ?>> records, @Nullable Acknowledgment acknowledgment,
Consumer<?, ?> consumer, Type type) {
KafkaMessageHeaders kafkaMessageHeaders = new KafkaMessageHeaders(this.generateMessageId,
this.generateTimestamp);
Map<String, Object> rawHeaders = kafkaMessageHeaders.getRawHeaders();
List<Object> payloads = new ArrayList<>();
List<Object> keys = new ArrayList<>();
List<String> topics = new ArrayList<>();
List<Integer> partitions = new ArrayList<>();
List<Long> offsets = new ArrayList<>();
List<String> timestampTypes = new ArrayList<>();
List<Long> timestamps = new ArrayList<>();
List<Map<String, Object>> convertedHeaders = new ArrayList<>();
List<Headers> natives = new ArrayList<>();
List<ConsumerRecord<?, ?>> raws = new ArrayList<>();
List<ConversionException> conversionFailures = new ArrayList<>();
if (this.headerMapper != null) {
rawHeaders.put(KafkaHeaders.BATCH_CONVERTED_HEADERS, convertedHeaders);
}
else {
rawHeaders.put(KafkaHeaders.NATIVE_HEADERS, natives);
}
if (this.rawRecordHeader) {
rawHeaders.put(KafkaHeaders.RAW_DATA, raws);
}
commonHeaders(acknowledgment, consumer, rawHeaders, keys, topics, partitions, offsets, timestampTypes,
timestamps);
rawHeaders.put(KafkaHeaders.CONVERSION_FAILURES, conversionFailures);
boolean logged = false;
for (ConsumerRecord<?, ?> record : records) {
payloads.add(obtainPayload(type, record, conversionFailures));
keys.add(record.key());
topics.add(record.topic());
partitions.add(record.partition());
offsets.add(record.offset());
if (record.timestampType() != null) {
timestampTypes.add(record.timestampType().name());
}
timestamps.add(record.timestamp());
if (this.headerMapper != null && record.headers() != null) {
Map<String, Object> converted = new HashMap<>();
this.headerMapper.toHeaders(record.headers(), converted);
convertedHeaders.add(converted);
}
else {
if (!logged) {
this.logger.debug(() ->
"No header mapper is available; Jackson is required for the default mapper; "
+ "headers (if present) are not mapped but provided raw in "
+ KafkaHeaders.NATIVE_HEADERS);
logged = true;
}
natives.add(record.headers());
}
if (this.rawRecordHeader) {
raws.add(record);
}
}
return MessageBuilder.createMessage(payloads, kafkaMessageHeaders);
}
private Object obtainPayload(Type type, ConsumerRecord<?, ?> record, List<ConversionException> conversionFailures) {
return this.recordConverter == null || !containerType(type)
? extractAndConvertValue(record, type)
: convert(record, type, conversionFailures);
}
@Override
public List<ProducerRecord<?, ?>> fromMessage(Message<?> message, String defaultTopic) {
throw new UnsupportedOperationException();
}
/**
* Subclasses can convert the value; by default, it's returned as provided by Kafka
* unless a {@link RecordMessageConverter} has been provided.
* @param record the record.
* @param type the required type.
* @return the value.
*/
protected Object extractAndConvertValue(ConsumerRecord<?, ?> record, Type type) {
return record.value() == null ? KafkaNull.INSTANCE : record.value();
}
/**
* Convert the record value.
* @param record the record.
* @param type the type - must be a {@link ParameterizedType} with a single generic
* type parameter.
* @param conversionFailures Conversion failures.
* @return the converted payload.
*/
protected Object convert(ConsumerRecord<?, ?> record, Type type, List<ConversionException> conversionFailures) {
try {
Object payload = this.recordConverter
.toMessage(record, null, null, ((ParameterizedType) type).getActualTypeArguments()[0]).getPayload();
conversionFailures.add(null);
return payload;
}
catch (ConversionException ex) {
byte[] original = null;
if (record.value() instanceof byte[]) {
original = (byte[]) record.value();
}
else if (record.value() instanceof Bytes) {
original = ((Bytes) record.value()).get();
}
else if (record.value() instanceof String) {
original = ((String) record.value()).getBytes(StandardCharsets.UTF_8);
}
if (original != null) {
SerializationUtils.deserializationException(record.headers(), original, ex, false);
conversionFailures.add(ex);
return null;
}
throw new ConversionException("The batch converter can only report conversion failures to the listener "
+ "if the record.value() is byte[], Bytes, or String", ex);
}
}
/**
* Return true if the type is a parameterized type with a single generic type
* parameter.
* @param type the type.
* @return true if the conditions are met.
*/
private boolean containerType(Type type) {
return type instanceof ParameterizedType
&& ((ParameterizedType) type).getActualTypeArguments().length == 1;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/ByteArrayJsonMessageConverter.java | /*
* Copyright 2019-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import org.springframework.messaging.Message;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* JSON Message converter - {@code byte[]} on output, String, Bytes, or byte[] on input.
* Used in conjunction with Kafka
* {@code ByteArraySerializer/(ByteArrayDeserializer, BytesDeserializer, or StringDeserializer)}.
* More efficient than {@link StringJsonMessageConverter} because the
* {@code String<->byte[]} conversion is avoided.
*
* @author Gary Russell
* @since 2.3
*
*/
public class ByteArrayJsonMessageConverter extends JsonMessageConverter {
public ByteArrayJsonMessageConverter() {
}
public ByteArrayJsonMessageConverter(ObjectMapper objectMapper) {
super(objectMapper);
}
@Override
protected Object convertPayload(Message<?> message) {
try {
return getObjectMapper().writeValueAsBytes(message.getPayload());
}
catch (JsonProcessingException e) {
throw new ConversionException("Failed to convert to JSON", message, e);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/BytesJsonMessageConverter.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import org.apache.kafka.common.utils.Bytes;
import org.springframework.messaging.Message;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* JSON Message converter - {@code Bytes} on output, String, Bytes, or byte[] on input.
* Used in conjunction with Kafka
* {@code BytesSerializer/(BytesDeserializer, ByteArrayDeserializer, or StringDeserializer)}.
* More efficient than {@link StringJsonMessageConverter} because the
* {@code String<->byte[]} conversion is avoided.
*
* @author Gary Russell
* @since 2.1.7
*
*/
public class BytesJsonMessageConverter extends JsonMessageConverter {
public BytesJsonMessageConverter() {
}
public BytesJsonMessageConverter(ObjectMapper objectMapper) {
super(objectMapper);
}
@Override
protected Object convertPayload(Message<?> message) {
try {
return Bytes.wrap(getObjectMapper().writeValueAsBytes(message.getPayload()));
}
catch (JsonProcessingException e) {
throw new ConversionException("Failed to convert to JSON", message, e);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/ConversionException.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.KafkaException;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
/**
* Exception for conversions.
*
* @author Gary Russell
*
*/
@SuppressWarnings("serial")
public class ConversionException extends KafkaException {
private transient ConsumerRecord<?, ?> record;
private transient List<ConsumerRecord<?, ?>> records = new ArrayList<>();
private transient Message<?> message;
/**
* Construct an instance with the provided properties.
* @param message A text message describing the reason.
* @param cause the cause.
*/
public ConversionException(String message, Throwable cause) {
super(message, cause);
this.record = null;
this.message = null;
}
/**
* Construct an instance with the provided properties.
* @param message A text message describing the reason.
* @param record the consumer record.
* @param cause the cause.
* @since 2.7.2
*/
public ConversionException(String message, ConsumerRecord<?, ?> record, Throwable cause) {
super(message, cause);
this.record = record;
this.message = null;
}
/**
* Construct an instance with the provided properties.
* @param message A text message describing the reason.
* @param records the consumer records.
* @param cause the cause.
* @since 2.7.2
*/
public ConversionException(String message, List<ConsumerRecord<?, ?>> records, Throwable cause) {
super(message, cause);
this.record = null;
this.records.addAll(records);
this.message = null;
}
/**
* Construct an instance with the provided properties.
* @param message A text message describing the reason.
* @param msg a {@link Message} converted from a consumer record.
* @param cause the cause.
* @since 2.7.2
*/
public ConversionException(String message, Message<?> msg, Throwable cause) {
super(message, cause);
this.record = null;
this.message = msg;
}
/**
* Return the consumer record, if available.
* @return the record.
* @since 2.7.2
*/
@Nullable
public ConsumerRecord<?, ?> getRecord() {
return this.record;
}
/**
* Return the consumer record, if available.
* @return the record.
* @since 2.7.2
*/
public List<ConsumerRecord<?, ?>> getRecords() {
return Collections.unmodifiableList(this.records);
}
/**
* Return the {@link Message}, if available.
* @return the message.
* @since 2.7.2
*/
@Nullable
public Message<?> getMsg() {
return this.message;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/JsonMessageConverter.java | /*
* Copyright 2019-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import java.io.IOException;
import java.lang.reflect.Type;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.apache.kafka.common.utils.Bytes;
import org.springframework.kafka.support.JacksonUtils;
import org.springframework.kafka.support.KafkaNull;
import org.springframework.kafka.support.mapping.DefaultJackson2JavaTypeMapper;
import org.springframework.kafka.support.mapping.Jackson2JavaTypeMapper;
import org.springframework.kafka.support.mapping.Jackson2JavaTypeMapper.TypePrecedence;
import org.springframework.messaging.Message;
import org.springframework.util.Assert;
import com.fasterxml.jackson.databind.JavaType;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.type.TypeFactory;
/**
* Base class for JSON message converters; on the consumer side, it can
* handle {@code byte[]}, {@link Bytes} and {@link String} record values.
* On the producer side, select a subclass that matches the corresponding
* Kafka Serializer.
*
* @author Gary Russell
* @since 2.3
*
*/
public class JsonMessageConverter extends MessagingMessageConverter {
private static final JavaType OBJECT = TypeFactory.defaultInstance().constructType(Object.class);
private final ObjectMapper objectMapper;
private Jackson2JavaTypeMapper typeMapper = new DefaultJackson2JavaTypeMapper();
public JsonMessageConverter() {
this(JacksonUtils.enhancedObjectMapper());
}
public JsonMessageConverter(ObjectMapper objectMapper) {
Assert.notNull(objectMapper, "'objectMapper' must not be null.");
this.objectMapper = objectMapper;
}
public Jackson2JavaTypeMapper getTypeMapper() {
return this.typeMapper;
}
/**
* Set a customized type mapper.
* @param typeMapper the type mapper.
*/
public void setTypeMapper(Jackson2JavaTypeMapper typeMapper) {
Assert.notNull(typeMapper, "'typeMapper' cannot be null");
this.typeMapper = typeMapper;
}
/**
* Return the object mapper.
* @return the mapper.
*/
protected ObjectMapper getObjectMapper() {
return this.objectMapper;
}
@Override
protected Headers initialRecordHeaders(Message<?> message) {
RecordHeaders headers = new RecordHeaders();
this.typeMapper.fromClass(message.getPayload().getClass(), headers);
return headers;
}
@Override
protected Object convertPayload(Message<?> message) {
throw new UnsupportedOperationException("Select a subclass that creates a ProducerRecord value "
+ "corresponding to the configured Kafka Serializer");
}
@Override
protected Object extractAndConvertValue(ConsumerRecord<?, ?> record, Type type) {
Object value = record.value();
if (record.value() == null) {
return KafkaNull.INSTANCE;
}
JavaType javaType = determineJavaType(record, type);
if (value instanceof Bytes) {
value = ((Bytes) value).get();
}
if (value instanceof String) {
try {
return this.objectMapper.readValue((String) value, javaType);
}
catch (IOException e) {
throw new ConversionException("Failed to convert from JSON", record, e);
}
}
else if (value instanceof byte[]) {
try {
return this.objectMapper.readValue((byte[]) value, javaType);
}
catch (IOException e) {
throw new ConversionException("Failed to convert from JSON", record, e);
}
}
else {
throw new IllegalStateException("Only String, Bytes, or byte[] supported");
}
}
private JavaType determineJavaType(ConsumerRecord<?, ?> record, Type type) {
JavaType javaType = this.typeMapper.getTypePrecedence().equals(TypePrecedence.INFERRED) && type != null
? TypeFactory.defaultInstance().constructType(type)
: this.typeMapper.toJavaType(record.headers());
if (javaType == null) { // no headers
if (type != null) {
javaType = TypeFactory.defaultInstance().constructType(type);
}
else {
javaType = OBJECT;
}
}
return javaType;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/KafkaMessageHeaders.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import java.util.Map;
import org.springframework.messaging.MessageHeaders;
/**
* Overload of message headers configurable for adding id and timestamp headers.
*
* @author Marius Bogoevici
* @author Gary Russell
* @since 1.1
*
*/
@SuppressWarnings("serial")
public class KafkaMessageHeaders extends MessageHeaders {
/**
* Construct headers with or without id and/or timestamp.
* @param generateId true to add an ID header.
* @param generateTimestamp true to add a timestamp header.
*/
KafkaMessageHeaders(boolean generateId, boolean generateTimestamp) {
super(null, generateId ? null : ID_VALUE_NONE, generateTimestamp ? null : -1L);
}
@Override
public Map<String, Object> getRawHeaders() { //NOSONAR - not useless, widening to public
return super.getRawHeaders();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/MappingJacksonParameterizedConverter.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import java.io.IOException;
import java.lang.reflect.Type;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.utils.Bytes;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.mapping.DefaultJackson2JavaTypeMapper;
import org.springframework.kafka.support.mapping.Jackson2JavaTypeMapper;
import org.springframework.kafka.support.mapping.Jackson2JavaTypeMapper.TypePrecedence;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
import org.springframework.messaging.converter.MappingJackson2MessageConverter;
import org.springframework.util.Assert;
import org.springframework.util.MimeType;
import com.fasterxml.jackson.databind.JavaType;
import com.fasterxml.jackson.databind.type.TypeFactory;
/**
* Subclass of {@link MappingJackson2MessageConverter} that can handle parameterized
* (generic) types.
*
* @author Gary Russell
* @since 2.7.1
*
*/
public class MappingJacksonParameterizedConverter extends MappingJackson2MessageConverter {
private static final JavaType OBJECT = TypeFactory.defaultInstance().constructType(Object.class);
private Jackson2JavaTypeMapper typeMapper = new DefaultJackson2JavaTypeMapper();
/**
* Construct a {@code MappingJacksonParameterizedConverter} supporting
* the {@code application/json} MIME type with {@code UTF-8} character set.
*/
public MappingJacksonParameterizedConverter() {
}
/**
* Construct a {@code MappingJacksonParameterizedConverter} supporting
* one or more custom MIME types.
* @param supportedMimeTypes the supported MIME types
*/
public MappingJacksonParameterizedConverter(MimeType... supportedMimeTypes) {
super(supportedMimeTypes);
}
/**
* Return the type mapper.
* @return the mapper.
*/
public Jackson2JavaTypeMapper getTypeMapper() {
return this.typeMapper;
}
/**
* Set a customized type mapper.
* @param typeMapper the type mapper.
*/
public void setTypeMapper(Jackson2JavaTypeMapper typeMapper) {
Assert.notNull(typeMapper, "'typeMapper' cannot be null");
this.typeMapper = typeMapper;
}
@Override
@Nullable
protected Object convertFromInternal(Message<?> message, Class<?> targetClass, @Nullable Object conversionHint) {
JavaType javaType = determineJavaType(message, conversionHint);
Object value = message.getPayload();
if (value instanceof Bytes) {
value = ((Bytes) value).get();
}
if (value instanceof String) {
try {
return getObjectMapper().readValue((String) value, javaType);
}
catch (IOException e) {
throw new ConversionException("Failed to convert from JSON", message, e);
}
}
else if (value instanceof byte[]) {
try {
return getObjectMapper().readValue((byte[]) value, javaType);
}
catch (IOException e) {
throw new ConversionException("Failed to convert from JSON", message, e);
}
}
else {
throw new IllegalStateException("Only String, Bytes, or byte[] supported");
}
}
private JavaType determineJavaType(Message<?> message, @Nullable Object hint) {
JavaType javaType = null;
Type type = null;
if (hint instanceof Type) {
type = (Type) hint;
Headers nativeHeaders = message.getHeaders().get(KafkaHeaders.NATIVE_HEADERS, Headers.class);
if (nativeHeaders != null) {
javaType = this.typeMapper.getTypePrecedence().equals(TypePrecedence.INFERRED)
? TypeFactory.defaultInstance().constructType(type)
: this.typeMapper.toJavaType(nativeHeaders);
}
}
if (javaType == null) { // no headers
if (type != null) {
javaType = TypeFactory.defaultInstance().constructType(type);
}
else {
javaType = OBJECT;
}
}
return javaType;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/MessageConverter.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import java.util.Map;
import org.apache.kafka.clients.consumer.Consumer;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.JavaUtils;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.KafkaUtils;
import org.springframework.lang.Nullable;
/**
* A top level interface for message converters.
*
* @author Gary Russell
* @since 1.1
*
*/
public interface MessageConverter {
/**
* Get the thread bound group id.
* @return the group id.
*/
@Nullable
static String getGroupId() {
return KafkaUtils.getConsumerGroupId();
}
/**
* Set up the common headers.
* @param acknowledgment the acknowledgment.
* @param consumer the consumer.
* @param rawHeaders the raw headers map.
* @param theKey the key.
* @param topic the topic.
* @param partition the partition.
* @param offset the offfset.
* @param timestampType the timestamp type.
* @param timestamp the timestamp.
*/
default void commonHeaders(Acknowledgment acknowledgment, Consumer<?, ?> consumer, Map<String, Object> rawHeaders,
@Nullable Object theKey, Object topic, Object partition, Object offset,
@Nullable Object timestampType, Object timestamp) {
rawHeaders.put(KafkaHeaders.RECEIVED_TOPIC, topic);
rawHeaders.put(KafkaHeaders.RECEIVED_PARTITION_ID, partition);
rawHeaders.put(KafkaHeaders.OFFSET, offset);
rawHeaders.put(KafkaHeaders.TIMESTAMP_TYPE, timestampType);
rawHeaders.put(KafkaHeaders.RECEIVED_TIMESTAMP, timestamp);
JavaUtils.INSTANCE
.acceptIfNotNull(KafkaHeaders.RECEIVED_MESSAGE_KEY, theKey, (key, val) -> rawHeaders.put(key, val))
.acceptIfNotNull(KafkaHeaders.GROUP_ID, MessageConverter.getGroupId(),
(key, val) -> rawHeaders.put(key, val))
.acceptIfNotNull(KafkaHeaders.ACKNOWLEDGMENT, acknowledgment, (key, val) -> rawHeaders.put(key, val))
.acceptIfNotNull(KafkaHeaders.CONSUMER, consumer, (key, val) -> rawHeaders.put(key, val));
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/MessagingMessageConverter.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.support.AbstractKafkaHeaderMapper;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.DefaultKafkaHeaderMapper;
import org.springframework.kafka.support.JacksonPresent;
import org.springframework.kafka.support.KafkaHeaderMapper;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.KafkaNull;
import org.springframework.kafka.support.SimpleKafkaHeaderMapper;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.converter.SmartMessageConverter;
import org.springframework.messaging.support.GenericMessage;
import org.springframework.messaging.support.MessageBuilder;
import org.springframework.util.Assert;
/**
* A Messaging {@link MessageConverter} implementation for a message listener that
* receives individual messages.
* <p>
* Populates {@link KafkaHeaders} based on the {@link ConsumerRecord} onto the returned
* message.
*
* @author Marius Bogoevici
* @author Gary Russell
* @author Dariusz Szablinski
* @author Biju Kunjummen
*/
public class MessagingMessageConverter implements RecordMessageConverter {
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); // NOSONAR
private boolean generateMessageId = false;
private boolean generateTimestamp = false;
private KafkaHeaderMapper headerMapper;
private boolean rawRecordHeader;
private SmartMessageConverter messagingConverter;
public MessagingMessageConverter() {
if (JacksonPresent.isJackson2Present()) {
this.headerMapper = new DefaultKafkaHeaderMapper();
}
else {
this.headerMapper = new SimpleKafkaHeaderMapper();
}
}
/**
* Generate {@link Message} {@code ids} for produced messages. If set to {@code false},
* will try to use a default value. By default set to {@code false}.
* @param generateMessageId true if a message id should be generated
*/
public void setGenerateMessageId(boolean generateMessageId) {
this.generateMessageId = generateMessageId;
}
/**
* Generate {@code timestamp} for produced messages. If set to {@code false}, -1 is
* used instead. By default set to {@code false}.
* @param generateTimestamp true if a timestamp should be generated
*/
public void setGenerateTimestamp(boolean generateTimestamp) {
this.generateTimestamp = generateTimestamp;
}
/**
* Set the header mapper to map headers.
* @param headerMapper the mapper.
* @since 1.3
*/
public void setHeaderMapper(KafkaHeaderMapper headerMapper) {
this.headerMapper = headerMapper;
}
/**
* Set to true to add the raw {@link ConsumerRecord} as a header
* {@link KafkaHeaders#RAW_DATA}.
* @param rawRecordHeader true to add the header.
* @since 2.7
*/
public void setRawRecordHeader(boolean rawRecordHeader) {
this.rawRecordHeader = rawRecordHeader;
}
protected org.springframework.messaging.converter.MessageConverter getMessagingConverter() {
return this.messagingConverter;
}
/**
* Set a spring-messaging {@link SmartMessageConverter} to convert the record value to
* the desired type. This will also cause the {@link MessageHeaders#CONTENT_TYPE} to
* be converted to String when mapped inbound.
* <p>
* IMPORTANT: This converter's {@link #fromMessage(Message, String)} method is called
* for outbound conversion to a {@link ProducerRecord} with the message payload in the
* {@link ProducerRecord#value()} property.
* {@link #toMessage(ConsumerRecord, Acknowledgment, Consumer, Type)} is called for
* inbound conversion from {@link ConsumerRecord} with the payload being the
* {@link ConsumerRecord#value()} property.
* <p>
* The {@link SmartMessageConverter#toMessage(Object, MessageHeaders)} method is
* called to create a new outbound {@link Message} from the {@link Message} passed to
* {@link #fromMessage(Message, String)}. Similarly, in
* {@link #toMessage(ConsumerRecord, Acknowledgment, Consumer, Type)}, after this
* converter has created a new {@link Message} from the {@link ConsumerRecord} the
* {@link SmartMessageConverter#fromMessage(Message, Class)} method is called and then
* the final inbound message is created with the newly converted payload.
* <p>
* In either case, if the {@link SmartMessageConverter} returns {@code null}, the
* original message is used.
* @param messagingConverter the converter.
* @since 2.7.1
*/
public void setMessagingConverter(SmartMessageConverter messagingConverter) {
this.messagingConverter = messagingConverter;
if (messagingConverter != null && this.headerMapper instanceof AbstractKafkaHeaderMapper) {
((AbstractKafkaHeaderMapper) this.headerMapper).addRawMappedHeader(MessageHeaders.CONTENT_TYPE, true);
}
}
@Override
public Message<?> toMessage(ConsumerRecord<?, ?> record, Acknowledgment acknowledgment, Consumer<?, ?> consumer,
Type type) {
KafkaMessageHeaders kafkaMessageHeaders = new KafkaMessageHeaders(this.generateMessageId,
this.generateTimestamp);
Map<String, Object> rawHeaders = kafkaMessageHeaders.getRawHeaders();
if (record.headers() != null) {
mapOrAddHeaders(record, rawHeaders);
}
String ttName = record.timestampType() != null ? record.timestampType().name() : null;
commonHeaders(acknowledgment, consumer, rawHeaders, record.key(), record.topic(), record.partition(),
record.offset(), ttName, record.timestamp());
if (this.rawRecordHeader) {
rawHeaders.put(KafkaHeaders.RAW_DATA, record);
}
Message<?> message = MessageBuilder.createMessage(extractAndConvertValue(record, type), kafkaMessageHeaders);
if (this.messagingConverter != null && !message.getPayload().equals(KafkaNull.INSTANCE)) {
Class<?> clazz = type instanceof Class ? (Class<?>) type : type instanceof ParameterizedType
? (Class<?>) ((ParameterizedType) type).getRawType() : Object.class;
Object payload = this.messagingConverter.fromMessage(message, clazz, type);
if (payload != null) {
message = new GenericMessage<>(payload, message.getHeaders());
}
}
return message;
}
private void mapOrAddHeaders(ConsumerRecord<?, ?> record, Map<String, Object> rawHeaders) {
if (this.headerMapper != null) {
this.headerMapper.toHeaders(record.headers(), rawHeaders);
}
else {
this.logger.debug(() ->
"No header mapper is available; Jackson is required for the default mapper; "
+ "headers (if present) are not mapped but provided raw in "
+ KafkaHeaders.NATIVE_HEADERS);
rawHeaders.put(KafkaHeaders.NATIVE_HEADERS, record.headers());
Header contentType = record.headers().lastHeader(MessageHeaders.CONTENT_TYPE);
if (contentType != null) {
rawHeaders.put(MessageHeaders.CONTENT_TYPE,
new String(contentType.value(), StandardCharsets.UTF_8));
}
}
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public ProducerRecord<?, ?> fromMessage(Message<?> messageArg, String defaultTopic) {
Message<?> message = messageArg;
if (this.messagingConverter != null) {
Message<?> converted = this.messagingConverter.toMessage(message.getPayload(), message.getHeaders());
if (converted != null) {
message = converted;
}
}
MessageHeaders headers = message.getHeaders();
Object topicHeader = headers.get(KafkaHeaders.TOPIC);
String topic = null;
if (topicHeader instanceof byte[]) {
topic = new String(((byte[]) topicHeader), StandardCharsets.UTF_8);
}
else if (topicHeader instanceof String) {
topic = (String) topicHeader;
}
else if (topicHeader == null) {
Assert.state(defaultTopic != null, "With no topic header, a defaultTopic is required");
}
else {
throw new IllegalStateException(KafkaHeaders.TOPIC + " must be a String or byte[], not "
+ topicHeader.getClass());
}
Integer partition = headers.get(KafkaHeaders.PARTITION_ID, Integer.class);
Object key = headers.get(KafkaHeaders.MESSAGE_KEY);
Object payload = convertPayload(message);
Long timestamp = headers.get(KafkaHeaders.TIMESTAMP, Long.class);
Headers recordHeaders = initialRecordHeaders(message);
if (this.headerMapper != null) {
this.headerMapper.fromHeaders(headers, recordHeaders);
}
return new ProducerRecord(topic == null ? defaultTopic : topic, partition, timestamp, key, payload,
recordHeaders);
}
/**
* Subclasses can populate additional headers before they are mapped.
* @param message the message.
* @return the headers
* @since 2.1
*/
protected Headers initialRecordHeaders(Message<?> message) {
return new RecordHeaders();
}
/**
* Subclasses can convert the payload; by default, it's sent unchanged to Kafka.
* @param message the message.
* @return the payload.
*/
protected Object convertPayload(Message<?> message) {
Object payload = message.getPayload();
if (payload instanceof KafkaNull) {
return null;
}
else {
return payload;
}
}
/**
* Subclasses can convert the value; by default, it's returned as provided by Kafka
* unless there is a {@link SmartMessageConverter} that can convert it.
* @param record the record.
* @param type the required type.
* @return the value.
*/
protected Object extractAndConvertValue(ConsumerRecord<?, ?> record, Type type) {
return record.value() == null ? KafkaNull.INSTANCE : record.value();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/ProjectingMessageConverter.java | /*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.lang.reflect.Type;
import java.nio.charset.StandardCharsets;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.utils.Bytes;
import org.springframework.core.ResolvableType;
import org.springframework.data.projection.MethodInterceptorFactory;
import org.springframework.data.projection.ProjectionFactory;
import org.springframework.data.projection.SpelAwareProxyProjectionFactory;
import org.springframework.data.web.JsonProjectingMethodInterceptorFactory;
import org.springframework.kafka.support.JacksonUtils;
import org.springframework.kafka.support.KafkaNull;
import org.springframework.messaging.Message;
import org.springframework.util.Assert;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.jayway.jsonpath.spi.mapper.JacksonMappingProvider;
/**
* A {@link MessageConverter} implementation that uses a Spring Data
* {@link ProjectionFactory} to bind incoming messages to projection interfaces.
*
* @author Oliver Gierke
* @author Artem Bilan
* @author Gary Russell
*
* @since 2.1.1
*/
public class ProjectingMessageConverter extends MessagingMessageConverter {
private final ProjectionFactory projectionFactory;
private final MessagingMessageConverter delegate;
/**
* Create a new {@link ProjectingMessageConverter} using a
* {@link JacksonUtils#enhancedObjectMapper()} by default.
* @since 2.3
*/
public ProjectingMessageConverter() {
this(JacksonUtils.enhancedObjectMapper());
}
/**
* Create a new {@link ProjectingMessageConverter} using the given {@link ObjectMapper}.
* @param mapper must not be {@literal null}.
*/
public ProjectingMessageConverter(ObjectMapper mapper) {
this(mapper, new StringJsonMessageConverter());
}
/**
* Create a new {@link ProjectingMessageConverter} using the given {@link ObjectMapper}.
* @param delegate the delegate converter for outbound and non-interfaces.
* @since 2.3
*/
public ProjectingMessageConverter(MessagingMessageConverter delegate) {
this(JacksonUtils.enhancedObjectMapper(), delegate);
}
/**
* Create a new {@link ProjectingMessageConverter} using the given {@link ObjectMapper}.
* @param mapper must not be {@literal null}.
* @param delegate the delegate converter for outbound and non-interfaces.
* @since 2.3
*/
public ProjectingMessageConverter(ObjectMapper mapper, MessagingMessageConverter delegate) {
Assert.notNull(mapper, "ObjectMapper must not be null");
Assert.notNull(delegate, "'delegate' cannot be null");
JacksonMappingProvider provider = new JacksonMappingProvider(mapper);
MethodInterceptorFactory interceptorFactory = new JsonProjectingMethodInterceptorFactory(provider);
SpelAwareProxyProjectionFactory factory = new SpelAwareProxyProjectionFactory();
factory.registerMethodInvokerFactory(interceptorFactory);
this.projectionFactory = factory;
this.delegate = delegate;
}
@Override
protected Object convertPayload(Message<?> message) {
return this.delegate.convertPayload(message);
}
@Override
protected Object extractAndConvertValue(ConsumerRecord<?, ?> record, Type type) {
Object value = record.value();
if (value == null) {
return KafkaNull.INSTANCE;
}
Class<?> rawType = ResolvableType.forType(type).resolve(Object.class);
if (!rawType.isInterface()) {
return this.delegate.extractAndConvertValue(record, type);
}
InputStream inputStream = new ByteArrayInputStream(getAsByteArray(value));
// The inputStream is closed underneath by the ObjectMapper#_readTreeAndClose()
return this.projectionFactory.createProjection(rawType, inputStream);
}
/**
* Return the given source value as byte array.
* @param source must not be {@literal null}.
* @return the source instance as byte array.
*/
private static byte[] getAsByteArray(Object source) {
Assert.notNull(source, "Source must not be null");
if (source instanceof String) {
return ((String) source).getBytes(StandardCharsets.UTF_8);
}
if (source instanceof byte[]) {
return (byte[]) source;
}
if (source instanceof Bytes) {
return ((Bytes) source).get();
}
throw new ConversionException(String.format(
"Unsupported payload type '%s'. Expected 'String', 'Bytes', or 'byte[]'",
source.getClass()), null);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/RecordMessageConverter.java | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import java.lang.reflect.Type;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.lang.NonNull;
import org.springframework.messaging.Message;
/**
* A Kafka-specific {@link Message} converter strategy.
*
* @author Gary Russell
* @since 1.1
*/
public interface RecordMessageConverter extends MessageConverter {
/**
* Convert a {@link ConsumerRecord} to a {@link Message}.
* @param record the record.
* @param acknowledgment the acknowledgment.
* @param consumer the consumer
* @param payloadType the required payload type.
* @return the message.
*/
@NonNull
Message<?> toMessage(ConsumerRecord<?, ?> record, Acknowledgment acknowledgment, Consumer<?, ?> consumer,
Type payloadType);
/**
* Convert a message to a producer record.
* @param message the message.
* @param defaultTopic the default topic to use if no header found.
* @return the producer record.
*/
ProducerRecord<?, ?> fromMessage(Message<?> message, String defaultTopic);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/StringJsonMessageConverter.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.converter;
import org.springframework.messaging.Message;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* JSON Message converter - String on output, String, Bytes, or byte[] on input. Used in
* conjunction with Kafka
* {@code StringSerializer/(StringDeserializer, BytesDeserializer, or ByteArrayDeserializer)}.
* Consider using the ByteArrayJsonMessageConverter instead to avoid unnecessary
* {@code String->byte[]} conversion.
*
* @author Gary Russell
* @author Artem Bilan
* @author Dariusz Szablinski
*/
public class StringJsonMessageConverter extends JsonMessageConverter {
public StringJsonMessageConverter() {
}
public StringJsonMessageConverter(ObjectMapper objectMapper) {
super(objectMapper);
}
@Override
protected Object convertPayload(Message<?> message) {
try {
return getObjectMapper()
.writeValueAsString(message.getPayload());
}
catch (JsonProcessingException e) {
throw new ConversionException("Failed to convert to JSON", message, e);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/converter/package-info.java | /**
* Package for kafka converters
*/
package org.springframework.kafka.support.converter;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/mapping/AbstractJavaTypeMapper.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.mapping;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.springframework.beans.factory.BeanClassLoaderAware;
import org.springframework.messaging.converter.MessageConversionException;
import org.springframework.util.ClassUtils;
/**
* Abstract type mapper.
*
* @author Mark Pollack
* @author Sam Nelson
* @author Andreas Asplund
* @author Gary Russell
* @author Elliot Kennedy
*
* @since 2.1
*/
public abstract class AbstractJavaTypeMapper implements BeanClassLoaderAware {
/**
* Default header name for type information.
*/
public static final String DEFAULT_CLASSID_FIELD_NAME = "__TypeId__";
/**
* Default header name for container object contents type information.
*/
public static final String DEFAULT_CONTENT_CLASSID_FIELD_NAME = "__ContentTypeId__";
/**
* Default header name for map key type information.
*/
public static final String DEFAULT_KEY_CLASSID_FIELD_NAME = "__KeyTypeId__";
/**
* Default header name for key type information.
*/
public static final String KEY_DEFAULT_CLASSID_FIELD_NAME = "__Key_TypeId__";
/**
* Default header name for key container object contents type information.
*/
public static final String KEY_DEFAULT_CONTENT_CLASSID_FIELD_NAME = "__Key_ContentTypeId__";
/**
* Default header name for key map key type information.
*/
public static final String KEY_DEFAULT_KEY_CLASSID_FIELD_NAME = "__Key_KeyTypeId__";
private final Map<String, Class<?>> idClassMapping = new ConcurrentHashMap<String, Class<?>>();
private final Map<Class<?>, byte[]> classIdMapping = new ConcurrentHashMap<Class<?>, byte[]>();
private String classIdFieldName = DEFAULT_CLASSID_FIELD_NAME;
private String contentClassIdFieldName = DEFAULT_CONTENT_CLASSID_FIELD_NAME;
private String keyClassIdFieldName = DEFAULT_KEY_CLASSID_FIELD_NAME;
private ClassLoader classLoader = ClassUtils.getDefaultClassLoader();
public String getClassIdFieldName() {
return this.classIdFieldName;
}
/**
* Configure header name for type information.
* @param classIdFieldName the header name.
* @since 2.1.3
*/
public void setClassIdFieldName(String classIdFieldName) {
this.classIdFieldName = classIdFieldName;
}
public String getContentClassIdFieldName() {
return this.contentClassIdFieldName;
}
/**
* Configure header name for container object contents type information.
* @param contentClassIdFieldName the header name.
* @since 2.1.3
*/
public void setContentClassIdFieldName(String contentClassIdFieldName) {
this.contentClassIdFieldName = contentClassIdFieldName;
}
public String getKeyClassIdFieldName() {
return this.keyClassIdFieldName;
}
/**
* Configure header name for map key type information.
* @param keyClassIdFieldName the header name.
* @since 2.1.3
*/
public void setKeyClassIdFieldName(String keyClassIdFieldName) {
this.keyClassIdFieldName = keyClassIdFieldName;
}
public void setIdClassMapping(Map<String, Class<?>> idClassMapping) {
this.idClassMapping.putAll(idClassMapping);
createReverseMap();
}
@Override
public void setBeanClassLoader(ClassLoader classLoader) {
this.classLoader = classLoader;
}
protected ClassLoader getClassLoader() {
return this.classLoader;
}
protected void addHeader(Headers headers, String headerName, Class<?> clazz) {
if (this.classIdMapping.containsKey(clazz)) {
headers.add(new RecordHeader(headerName, this.classIdMapping.get(clazz)));
}
else {
headers.add(new RecordHeader(headerName, clazz.getName().getBytes(StandardCharsets.UTF_8)));
}
}
protected String retrieveHeader(Headers headers, String headerName) {
String classId = retrieveHeaderAsString(headers, headerName);
if (classId == null) {
throw new MessageConversionException(
"failed to convert Message content. Could not resolve " + headerName + " in header");
}
return classId;
}
protected String retrieveHeaderAsString(Headers headers, String headerName) {
Header header = headers.lastHeader(headerName);
if (header != null) {
String classId = null;
if (header.value() != null) {
classId = new String(header.value(), StandardCharsets.UTF_8);
}
return classId;
}
return null;
}
private void createReverseMap() {
this.classIdMapping.clear();
for (Map.Entry<String, Class<?>> entry : this.idClassMapping.entrySet()) {
String id = entry.getKey();
Class<?> clazz = entry.getValue();
this.classIdMapping.put(clazz, id.getBytes(StandardCharsets.UTF_8));
}
}
public Map<String, Class<?>> getIdClassMapping() {
return Collections.unmodifiableMap(this.idClassMapping);
}
/**
* Configure the TypeMapper to use default key type class.
* @param isKey Use key type headers if true
* @since 2.1.3
*/
public void setUseForKey(boolean isKey) {
if (isKey) {
setClassIdFieldName(AbstractJavaTypeMapper.KEY_DEFAULT_CLASSID_FIELD_NAME);
setContentClassIdFieldName(AbstractJavaTypeMapper.KEY_DEFAULT_CONTENT_CLASSID_FIELD_NAME);
setKeyClassIdFieldName(AbstractJavaTypeMapper.KEY_DEFAULT_KEY_CLASSID_FIELD_NAME);
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/mapping/ClassMapper.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.mapping;
import org.apache.kafka.common.header.Headers;
/**
* Strategy for setting metadata on messages such that one can create the class
* that needs to be instantiated when receiving a message.
*
* @author Mark Pollack
* @author James Carr
* @author Gary Russell
*
* @since 2.1
*/
public interface ClassMapper {
void fromClass(Class<?> clazz, Headers headers);
Class<?> toClass(Headers headers);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/mapping/DefaultJackson2JavaTypeMapper.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.mapping;
import java.util.Arrays;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import org.apache.kafka.common.header.Headers;
import org.springframework.messaging.converter.MessageConversionException;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.PatternMatchUtils;
import com.fasterxml.jackson.databind.JavaType;
import com.fasterxml.jackson.databind.type.TypeFactory;
/**
* Jackson 2 type mapper.
*
* @author Mark Pollack
* @author Sam Nelson
* @author Andreas Asplund
* @author Artem Bilan
* @author Gary Russell
*
* @since 2.1
*/
public class DefaultJackson2JavaTypeMapper extends AbstractJavaTypeMapper
implements Jackson2JavaTypeMapper {
private static final List<String> TRUSTED_PACKAGES =
Arrays.asList(
"java.util",
"java.lang"
);
private final Set<String> trustedPackages = new LinkedHashSet<>(TRUSTED_PACKAGES);
private volatile TypePrecedence typePrecedence = TypePrecedence.INFERRED;
/**
* Return the precedence.
* @return the precedence.
* @see #setTypePrecedence(Jackson2JavaTypeMapper.TypePrecedence)
*/
@Override
public TypePrecedence getTypePrecedence() {
return this.typePrecedence;
}
@Override
public void setTypePrecedence(TypePrecedence typePrecedence) {
Assert.notNull(typePrecedence, "'typePrecedence' cannot be null");
this.typePrecedence = typePrecedence;
}
/**
* Specify a set of packages to trust during deserialization.
* The asterisk ({@code *}) means trust all.
* @param packagesToTrust the trusted Java packages for deserialization
*/
@Override
public void addTrustedPackages(String... packagesToTrust) {
if (this.trustedPackages.size() == 0) {
return;
}
if (packagesToTrust != null) {
for (String trusted : packagesToTrust) {
if ("*".equals(trusted)) {
this.trustedPackages.clear();
break;
}
else {
this.trustedPackages.add(trusted);
}
}
}
}
@Override
public JavaType toJavaType(Headers headers) {
String typeIdHeader = retrieveHeaderAsString(headers, getClassIdFieldName());
if (typeIdHeader != null) {
JavaType classType = getClassIdType(typeIdHeader);
if (!classType.isContainerType() || classType.isArrayType()) {
return classType;
}
JavaType contentClassType = getClassIdType(retrieveHeader(headers, getContentClassIdFieldName()));
if (classType.getKeyType() == null) {
return TypeFactory.defaultInstance()
.constructCollectionLikeType(classType.getRawClass(), contentClassType);
}
JavaType keyClassType = getClassIdType(retrieveHeader(headers, getKeyClassIdFieldName()));
return TypeFactory.defaultInstance()
.constructMapLikeType(classType.getRawClass(), keyClassType, contentClassType);
}
return null;
}
private JavaType getClassIdType(String classId) {
if (getIdClassMapping().containsKey(classId)) {
return TypeFactory.defaultInstance().constructType(getIdClassMapping().get(classId));
}
else {
try {
if (!isTrustedPackage(classId)) {
throw new IllegalArgumentException("The class '" + classId
+ "' is not in the trusted packages: "
+ this.trustedPackages + ". "
+ "If you believe this class is safe to deserialize, please provide its name. "
+ "If the serialization is only done by a trusted source, you can also enable "
+ "trust all (*).");
}
else {
return TypeFactory.defaultInstance()
.constructType(ClassUtils.forName(classId, getClassLoader()));
}
}
catch (ClassNotFoundException e) {
throw new MessageConversionException("failed to resolve class name. Class not found ["
+ classId + "]", e);
}
catch (LinkageError e) {
throw new MessageConversionException("failed to resolve class name. Linkage error ["
+ classId + "]", e);
}
}
}
private boolean isTrustedPackage(String requestedType) {
if (!this.trustedPackages.isEmpty()) {
String packageName = ClassUtils.getPackageName(requestedType).replaceFirst("\\[L", "");
for (String trustedPackage : this.trustedPackages) {
if (PatternMatchUtils.simpleMatch(trustedPackage, packageName)) {
return true;
}
}
return false;
}
return true;
}
@Override
public void fromJavaType(JavaType javaType, Headers headers) {
String classIdFieldName = getClassIdFieldName();
if (headers.lastHeader(classIdFieldName) != null) {
removeHeaders(headers);
}
addHeader(headers, classIdFieldName, javaType.getRawClass());
if (javaType.isContainerType() && !javaType.isArrayType()) {
addHeader(headers, getContentClassIdFieldName(), javaType.getContentType().getRawClass());
}
if (javaType.getKeyType() != null) {
addHeader(headers, getKeyClassIdFieldName(), javaType.getKeyType().getRawClass());
}
}
@Override
public void fromClass(Class<?> clazz, Headers headers) {
fromJavaType(TypeFactory.defaultInstance().constructType(clazz), headers);
}
@Override
public Class<?> toClass(Headers headers) {
return toJavaType(headers).getRawClass();
}
@Override
public void removeHeaders(Headers headers) {
try {
headers.remove(getClassIdFieldName());
headers.remove(getContentClassIdFieldName());
headers.remove(getKeyClassIdFieldName());
}
catch (Exception e) { // NOSONAR
// NOSONAR
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/mapping/Jackson2JavaTypeMapper.java | /*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.mapping;
import org.apache.kafka.common.header.Headers;
import com.fasterxml.jackson.databind.JavaType;
/**
* Strategy for setting metadata on messages such that one can create the class that needs
* to be instantiated when receiving a message.
*
* @author Mark Pollack
* @author James Carr
* @author Sam Nelson
* @author Andreas Asplund
* @author Gary Russell
*
* @since 2.1
*/
public interface Jackson2JavaTypeMapper extends ClassMapper {
/**
* The precedence for type conversion - inferred from the method parameter or message
* headers. Only applies if both exist.
*/
enum TypePrecedence {
/**
* The type is inferred from the destination method.
*/
INFERRED,
/**
* The type is obtained from headers.
*/
TYPE_ID
}
void fromJavaType(JavaType javaType, Headers headers);
JavaType toJavaType(Headers headers);
TypePrecedence getTypePrecedence();
/**
* Set the precedence for evaluating type information in message properties.
* When using {@code @KafkaListener} at the method level, the framework attempts
* to determine the target type for payload conversion from the method signature.
* If so, this type is provided by the {@code MessagingMessageListenerAdapter}.
* <p> By default, if the type is concrete (not abstract, not an interface), this will
* be used ahead of type information provided in the {@code __TypeId__} and
* associated headers provided by the sender.
* <p> If you wish to force the use of the {@code __TypeId__} and associated headers
* (such as when the actual type is a subclass of the method argument type),
* set the precedence to {@link Jackson2JavaTypeMapper.TypePrecedence#TYPE_ID}.
* @param typePrecedence the precedence.
* @since 2.2
*/
default void setTypePrecedence(TypePrecedence typePrecedence) {
throw new UnsupportedOperationException("This mapper does not support this method");
}
void addTrustedPackages(String... packages);
/**
* Remove the type information headers.
* @param headers the headers.
* @since 2.2
*/
default void removeHeaders(Headers headers) {
// NOSONAR
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/mapping/package-info.java | /**
* Provides classes related to type mapping.
*/
package org.springframework.kafka.support.mapping; |
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/micrometer/MicrometerHolder.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.micrometer;
import java.util.Collections;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.lang.Nullable;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.Timer;
import io.micrometer.core.instrument.Timer.Builder;
import io.micrometer.core.instrument.Timer.Sample;
/**
* A wrapper for micrometer timers when available on the class path.
*
* @author Gary Russell
* @since 2.5
*
*/
public final class MicrometerHolder {
private static final String NONE_EXCEPTION_METERS_KEY = "none";
private final Map<String, Timer> meters = new ConcurrentHashMap<>();
private final MeterRegistry registry;
private final String timerName;
private final String timerDesc;
private final String name;
private final Map<String, String> tags;
/**
* Create an instance with the provided properties.
* @param context the application context from which to obtain the meter registry.
* @param name the value of the 'name' tag.
* @param timerName the timer name.
* @param timerDesc the timer description.
* @param tags additional tags.
*/
public MicrometerHolder(@Nullable ApplicationContext context, String name,
String timerName, String timerDesc, Map<String, String> tags) {
if (context == null) {
throw new IllegalStateException("No micrometer registry present");
}
Map<String, MeterRegistry> registries = context.getBeansOfType(MeterRegistry.class, false, false);
this.timerName = timerName;
this.timerDesc = timerDesc;
this.name = name;
this.tags = tags;
registries = filterRegistries(registries, context);
if (registries.size() == 1) {
this.registry = registries.values().iterator().next();
buildTimer(NONE_EXCEPTION_METERS_KEY);
}
else {
throw new IllegalStateException("No micrometer registry present (or more than one and "
+ "none marked @Primary)");
}
}
private Map<String, MeterRegistry> filterRegistries(Map<String, MeterRegistry> registries,
ApplicationContext context) {
if (registries.size() == 1) {
return registries;
}
MeterRegistry primary = null;
if (context instanceof ConfigurableApplicationContext) {
BeanDefinitionRegistry bdr = (BeanDefinitionRegistry) ((ConfigurableApplicationContext) context)
.getBeanFactory();
for (Entry<String, MeterRegistry> entry : registries.entrySet()) {
BeanDefinition beanDefinition = bdr.getBeanDefinition(entry.getKey());
if (beanDefinition.isPrimary()) {
if (primary != null) {
primary = null;
break;
}
else {
primary = entry.getValue();
}
}
}
}
if (primary != null) {
return Collections.singletonMap("primary", primary);
}
else {
return registries;
}
}
/**
* Start the timer.
* @return the sample.
*/
public Object start() {
return Timer.start(this.registry);
}
/**
* Record success.
* @param sample the sample.
* @see #start()
*/
public void success(Object sample) {
Timer timer = this.meters.get(NONE_EXCEPTION_METERS_KEY);
if (timer != null) {
((Sample) sample).stop(timer);
}
}
/**
* Record failure.
* @param sample the sample.
* @param exception the exception name.
* @see #start()
*/
public void failure(Object sample, String exception) {
Timer timer = this.meters.get(exception);
if (timer == null) {
timer = buildTimer(exception);
}
((Sample) sample).stop(timer);
}
private Timer buildTimer(String exception) {
Builder builder = Timer.builder(this.timerName)
.description(this.timerDesc)
.tag("name", this.name)
.tag("result", exception.equals(NONE_EXCEPTION_METERS_KEY) ? "success" : "failure")
.tag("exception", exception);
if (this.tags != null && !this.tags.isEmpty()) {
this.tags.forEach(builder::tag);
}
Timer registeredTimer = builder.register(this.registry);
this.meters.put(exception, registeredTimer);
return registeredTimer;
}
/**
* Remove the timers.
*/
public void destroy() {
this.meters.values().forEach(this.registry::remove);
this.meters.clear();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/micrometer/package-info.java | /**
* Provides classes to support Micrometer.
*/
package org.springframework.kafka.support.micrometer;
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/DelegatingByTopicDeserializer.java | /*
* Copyright 2019-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.util.Map;
import java.util.regex.Pattern;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.serialization.Deserializer;
/**
* A {@link Deserializer} that delegates to other deserializers based on the topic name.
*
* @author Gary Russell
* @since 2.8
*
*/
public class DelegatingByTopicDeserializer extends DelegatingByTopicSerialization<Deserializer<?>>
implements Deserializer<Object> {
/**
* Construct an instance that will be configured in {@link #configure(Map, boolean)}
* with consumer properties.
*/
public DelegatingByTopicDeserializer() {
}
/**
* Construct an instance with the supplied mapping of topic name patterns to delegate
* deserializers.
* @param delegates the map of delegates.
* @param defaultDelegate the default to use when no topic name match.
*/
public DelegatingByTopicDeserializer(Map<Pattern, Deserializer<?>> delegates, Deserializer<?> defaultDelegate) {
super(delegates, defaultDelegate);
}
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
super.configure(configs, isKey);
}
@Override
protected Deserializer<?> configureDelegate(Map<String, ?> configs, boolean isKey, Deserializer<?> delegate) {
delegate.configure(configs, isKey);
return delegate;
}
@Override
protected boolean isInstance(Object delegate) {
return delegate instanceof Deserializer;
}
@Override
public Object deserialize(String topic, byte[] data) {
throw new UnsupportedOperationException();
}
@Override
public Object deserialize(String topic, Headers headers, byte[] data) {
return findDelegate(topic).deserialize(topic, headers, data);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/DelegatingByTopicSerialization.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Pattern;
import org.springframework.core.log.LogAccessor;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.StringUtils;
/**
* Base class with common code for delegating by topic serialization.
*
* @param <T> the type.
*
* @author Gary Russell
* @since 2.8
*
*/
public abstract class DelegatingByTopicSerialization<T extends Closeable> implements Closeable {
private static final String UNCHECKED = "unchecked";
private static final LogAccessor LOGGER = new LogAccessor(DelegatingDeserializer.class);
/**
* Name of the configuration property containing the serialization selector map for
* values with format {@code selector:class,...}.
*/
public static final String VALUE_SERIALIZATION_TOPIC_CONFIG = "spring.kafka.value.serialization.bytopic.config";
/**
* Name of the configuration property containing the serialization topic pattern map for
* keys with format {@code pattern:class,...}.
*/
public static final String KEY_SERIALIZATION_TOPIC_CONFIG = "spring.kafka.key.serialization.bytopic.config";
/**
* Name of the default delegate for keys when no topic name match is fount.
*/
public static final String VALUE_SERIALIZATION_TOPIC_DEFAULT = "spring.kafka.value.serialization.bytopic.default";
/**
* Name of the default delegate for keys when no topic name match is fount.
*/
public static final String KEY_SERIALIZATION_TOPIC_DEFAULT = "spring.kafka.key.serialization.bytopic.default";
/**
* Set to false to make topic pattern matching case-insensitive.
*/
public static final String CASE_SENSITIVE = "spring.kafka.value.serialization.bytopic.case.insensitive";
private final Map<Pattern, T> delegates = new ConcurrentHashMap<>();
private final Set<String> patterns = ConcurrentHashMap.newKeySet();
private T defaultDelegate;
private boolean forKeys;
private boolean cased = true;
public DelegatingByTopicSerialization() {
}
public DelegatingByTopicSerialization(Map<Pattern, T> delegates, T defaultDelegate) {
Assert.notNull(delegates, "'delegates' cannot be null");
Assert.notNull(defaultDelegate, "'defaultDelegate' cannot be null");
this.delegates.putAll(delegates);
delegates.keySet().forEach(pattern -> Assert.isTrue(this.patterns.add(pattern.pattern()),
"Duplicate pattern: " + pattern.pattern()));
this.defaultDelegate = defaultDelegate;
}
/**
* Set to false to make topic name matching case insensitive.
* @param caseSensitive false for case insensitive.
*/
public void setCaseSensitive(boolean caseSensitive) {
this.cased = caseSensitive;
}
@SuppressWarnings(UNCHECKED)
protected void configure(Map<String, ?> configs, boolean isKey) {
if (this.delegates.size() > 0) {
this.delegates.values().forEach(delegate -> configureDelegate(configs, isKey, delegate));
}
this.forKeys = isKey;
Object insensitive = configs.get(CASE_SENSITIVE);
if (insensitive instanceof String) {
this.cased = Boolean.parseBoolean((String) insensitive);
}
else if (insensitive instanceof Boolean) {
this.cased = (Boolean) insensitive;
}
String configKey = defaultKey();
if (configKey != null && configs.containsKey(configKey)) {
buildDefault(configs, configKey, isKey, configs.get(configKey));
}
configKey = configKey();
Object value = configs.get(configKey);
if (value == null) {
return;
}
else if (value instanceof Map) {
processMap(configs, isKey, configKey, (Map<Object, Object>) value);
}
else if (value instanceof String) {
this.delegates.putAll(createDelegates((String) value, configs, isKey));
}
else {
throw new IllegalStateException(
configKey + " must be a map or String, not " + value.getClass());
}
}
private String defaultKey() {
return this.forKeys ? KEY_SERIALIZATION_TOPIC_DEFAULT : VALUE_SERIALIZATION_TOPIC_DEFAULT;
}
private String configKey() {
return this.forKeys ? KEY_SERIALIZATION_TOPIC_CONFIG : VALUE_SERIALIZATION_TOPIC_CONFIG;
}
private void processMap(Map<String, ?> configs, boolean isKey, String configKey, Map<Object, Object> value) {
value.forEach((key, delegate) -> {
Pattern pattern = obtainPattern(key);
build(configs, isKey, configKey, delegate, pattern);
});
}
@SuppressWarnings(UNCHECKED)
protected void build(Map<String, ?> configs, boolean isKey, String configKey, Object delegate, Pattern pattern) {
if (isInstance(delegate)) {
if (!this.patterns.add(pattern.pattern())) {
LOGGER.debug(() -> "Delegate already configured for " + pattern.pattern());
return;
}
this.delegates.put(pattern, (T) delegate);
configureDelegate(configs, isKey, (T) delegate);
}
else if (delegate instanceof Class) {
instantiateAndConfigure(configs, isKey, this.delegates, pattern, (Class<?>) delegate);
}
else if (delegate instanceof String) {
createInstanceAndConfigure(configs, isKey, this.delegates, pattern, (String) delegate);
}
else {
throw new IllegalStateException(configKey
+ " map entries must be Serializers or class names, not " + delegate.getClass());
}
}
@SuppressWarnings(UNCHECKED)
protected void buildDefault(Map<String, ?> configs, String configKey, boolean isKey, Object delegate) {
if (isInstance(delegate)) {
this.defaultDelegate = configureDelegate(configs, isKey, (T) delegate);
}
else if (delegate instanceof Class) {
this.defaultDelegate = instantiateAndConfigure(configs, isKey, this.delegates, null, (Class<?>) delegate);
}
else if (delegate instanceof String) {
this.defaultDelegate = createInstanceAndConfigure(configs, isKey, this.delegates, null, (String) delegate);
}
else {
throw new IllegalStateException(configKey
+ " map entries must be Serializers or class names, not " + delegate.getClass());
}
}
/**
* Configure the delegate.
*
* @param configs the configs.
* @param isKey true if this is for keys.
* @param delegate the delegate.
* @return the delegate.
*/
protected abstract T configureDelegate(Map<String, ?> configs, boolean isKey, T delegate);
/**
* Return true if this object is an instance of T.
* @param delegate the delegate.
* @return true if a T.
*/
protected abstract boolean isInstance(Object delegate);
private Map<Pattern, T> createDelegates(String mappings, Map<String, ?> configs, boolean isKey) {
Map<Pattern, T> delegateMap = new HashMap<>();
String[] array = StringUtils.commaDelimitedListToStringArray(mappings);
for (String entry : array) {
String[] split = entry.split(":");
Assert.isTrue(split.length == 2, "Each comma-delimited selector entry must have exactly one ':'");
createInstanceAndConfigure(configs, isKey, delegateMap, obtainPattern(split[0]), split[1]);
}
return delegateMap;
}
@Nullable
private T createInstanceAndConfigure(Map<String, ?> configs, boolean isKey,
Map<Pattern, T> delegates2, @Nullable Pattern pattern, String className) {
try {
Class<?> clazz = ClassUtils.forName(className.trim(), ClassUtils.getDefaultClassLoader());
return instantiateAndConfigure(configs, isKey, delegates2, pattern, clazz);
}
catch (ClassNotFoundException | LinkageError e) {
throw new IllegalArgumentException(e);
}
}
private Pattern obtainPattern(Object key) {
if (key instanceof Pattern) {
return (Pattern) key;
}
else if (key instanceof String) {
if (this.cased) {
return Pattern.compile(((String) key).trim());
}
else {
return Pattern.compile(((String) key).trim(), Pattern.CASE_INSENSITIVE);
}
}
else {
throw new IllegalStateException("Map key must be a Pattern or a String, not a " + key.getClass());
}
}
protected T instantiateAndConfigure(Map<String, ?> configs, boolean isKey, Map<Pattern, T> delegates2,
@Nullable Pattern pattern, Class<?> clazz) {
if (pattern != null && !this.patterns.add(pattern.pattern())) {
LOGGER.debug(() -> "Delegate already configured for " + pattern.pattern());
return null;
}
try {
@SuppressWarnings(UNCHECKED)
T delegate = (T) clazz.getDeclaredConstructor().newInstance();
configureDelegate(configs, isKey, delegate);
if (pattern != null) {
delegates2.put(pattern, delegate);
}
return delegate;
}
catch (Exception e) {
throw new IllegalArgumentException(e);
}
}
public void addDelegate(Pattern pattern, T serializer) {
this.delegates.put(pattern, serializer);
}
@Nullable
public T removeDelegate(Pattern pattern) {
return this.delegates.remove(pattern);
}
/**
* Determine the delegate for the topic.
* @param topic the topic.
* @return the delegate.
*/
@SuppressWarnings(UNCHECKED)
protected T findDelegate(String topic) {
T delegate = null;
for (Entry<Pattern, T> entry : this.delegates.entrySet()) {
if (entry.getKey().matcher(topic).matches()) {
delegate = entry.getValue();
break;
}
}
if (delegate == null) {
delegate = this.defaultDelegate;
}
if (delegate == null) {
throw new IllegalStateException(
"No (de)serializer found for topic '" + topic + "'");
}
return delegate;
}
@Override
public void close() {
this.delegates.values().forEach(delegate -> {
try {
delegate.close();
}
catch (IOException ex) {
LOGGER.error(ex, () -> "Failed to close " + delegate);
}
});
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/DelegatingByTopicSerializer.java | /*
* Copyright 2019-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.util.Map;
import java.util.regex.Pattern;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.serialization.Serializer;
/**
* A {@link Serializer} that delegates to other serializers based on a topic pattern.
*
* @author Gary Russell
* @since 2.8
*
*/
public class DelegatingByTopicSerializer extends DelegatingByTopicSerialization<Serializer<?>>
implements Serializer<Object> {
/**
* Construct an instance that will be configured in {@link #configure(Map, boolean)}
* with producer properties {@link #VALUE_SERIALIZATION_TOPIC_CONFIG} and
* {@link #KEY_SERIALIZATION_TOPIC_CONFIG}.
*/
public DelegatingByTopicSerializer() {
}
/**
* Construct an instance with the supplied mapping of topic patterns to delegate
* serializers.
* @param delegates the map of delegates.
* @param defaultDelegate the default to use when no topic name match.
*/
public DelegatingByTopicSerializer(Map<Pattern, Serializer<?>> delegates, Serializer<?> defaultDelegate) {
super(delegates, defaultDelegate);
}
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
super.configure(configs, isKey);
}
@Override
protected Serializer<?> configureDelegate(Map<String, ?> configs, boolean isKey, Serializer<?> delegate) {
delegate.configure(configs, isKey);
return delegate;
}
@Override
protected boolean isInstance(Object instance) {
return instance instanceof Serializer;
}
@Override
public byte[] serialize(String topic, Object data) {
throw new UnsupportedOperationException();
}
@SuppressWarnings("unchecked")
@Override
public byte[] serialize(String topic, Headers headers, Object data) {
if (data == null) {
return null;
}
return ((Serializer<Object>) findDelegate(topic)).serialize(topic, headers, data);
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/DelegatingByTypeSerializer.java | /*
* Copyright 2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.stream.Collectors;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.serialization.Serializer;
import org.springframework.util.Assert;
/**
* Delegates to a serializer based on type.
*
* @author Gary Russell
* @since 2.7.9
*
*/
public class DelegatingByTypeSerializer implements Serializer<Object> {
private static final String RAWTYPES = "rawtypes";
@SuppressWarnings(RAWTYPES)
private final Map<Class<?>, Serializer> delegates = new LinkedHashMap<>();
private final boolean assignable;
/**
* Construct an instance with the map of delegates; keys matched exactly.
* @param delegates the delegates.
*/
@SuppressWarnings(RAWTYPES)
public DelegatingByTypeSerializer(Map<Class<?>, Serializer> delegates) {
this(delegates, false);
}
/**
* Construct an instance with the map of delegates; keys matched exactly or if the
* target object is assignable to the key, depending on the assignable argument.
* If assignable, entries are checked in the natural entry order so an ordered map
* such as a {@link LinkedHashMap} is recommended.
* @param delegates the delegates.
* @param assignable whether the target is assignable to the key.
* @since 2.8.3
*/
@SuppressWarnings(RAWTYPES)
public DelegatingByTypeSerializer(Map<Class<?>, Serializer> delegates, boolean assignable) {
Assert.notNull(delegates, "'delegates' cannot be null");
Assert.noNullElements(delegates.values(), "Serializers in delegates map cannot be null");
this.delegates.putAll(delegates);
this.assignable = assignable;
}
/**
* Returns true if {@link #findDelegate(Object, Map)} should consider assignability to
* the key rather than an exact match.
* @return true if assigable.
* @since 2.8.3
*/
protected boolean isAssignable() {
return this.assignable;
}
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
this.delegates.values().forEach(del -> del.configure(configs, isKey));
}
@SuppressWarnings({ RAWTYPES, "unchecked" })
@Override
public byte[] serialize(String topic, Object data) {
if (data == null) {
return null;
}
Serializer delegate = findDelegate(data, this.delegates);
return delegate.serialize(topic, data);
}
@SuppressWarnings({ "unchecked", RAWTYPES })
@Override
public byte[] serialize(String topic, Headers headers, Object data) {
if (data == null) {
return null;
}
Serializer delegate = findDelegate(data, this.delegates);
return delegate.serialize(topic, headers, data);
}
/**
* Determine the serializer for the data type.
* @param data the data.
* @param delegates the available delegates.
* @return the delgate.
* @throws SerializationException when there is no match.
* @since 2.8.3
*/
@SuppressWarnings(RAWTYPES)
protected Serializer findDelegate(Object data, Map<Class<?>, Serializer> delegates) {
if (!this.assignable) {
Serializer delegate = delegates.get(data.getClass());
if (delegate == null) {
throw new SerializationException("No matching delegate for type: " + data.getClass().getName()
+ "; supported types: " + this.delegates.keySet().stream()
.map(clazz -> clazz.getName())
.collect(Collectors.toList()));
}
return delegate;
}
else {
Iterator<Entry<Class<?>, Serializer>> iterator = this.delegates.entrySet().iterator();
while (iterator.hasNext()) {
Entry<Class<?>, Serializer> entry = iterator.next();
if (entry.getKey().isAssignableFrom(data.getClass())) {
return entry.getValue();
}
}
throw new SerializationException("No matching delegate for type: " + data.getClass().getName()
+ "; supported types: " + this.delegates.keySet().stream()
.map(clazz -> clazz.getName())
.collect(Collectors.toList()));
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/DelegatingDeserializer.java | /*
* Copyright 2019-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.Serde;
import org.apache.kafka.common.serialization.Serdes;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.StringUtils;
/**
* A {@link Deserializer} that delegates to other deserializers based on a serialization
* selector header. It is not necessary to configure standard deserializers supported by
* {@link Serdes}.
*
* @author Gary Russell
* @since 2.3
*
*/
public class DelegatingDeserializer implements Deserializer<Object> {
private final Map<String, Deserializer<? extends Object>> delegates = new ConcurrentHashMap<>();
private final Map<String, Object> autoConfigs = new HashMap<>();
private boolean forKeys;
/**
* Construct an instance that will be configured in {@link #configure(Map, boolean)}
* with consumer properties
* {@link DelegatingSerializer#KEY_SERIALIZATION_SELECTOR_CONFIG} and
* {@link DelegatingSerializer#VALUE_SERIALIZATION_SELECTOR_CONFIG}.
*/
public DelegatingDeserializer() {
}
/**
* Construct an instance with the supplied mapping of selectors to delegate
* deserializers. The selector must be supplied in the
* {@link DelegatingSerializer#KEY_SERIALIZATION_SELECTOR_CONFIG} and
* {@link DelegatingSerializer#VALUE_SERIALIZATION_SELECTOR_CONFIG} headers. It is not
* necessary to configure standard deserializers supported by {@link Serdes}.
* @param delegates the map of delegates.
*/
public DelegatingDeserializer(Map<String, Deserializer<?>> delegates) {
this.delegates.putAll(delegates);
}
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
this.autoConfigs.putAll(configs);
this.forKeys = isKey;
String configKey = configKey();
Object value = configs.get(configKey);
if (value == null) {
return;
}
if (value instanceof Map) {
((Map<String, Object>) value).forEach((selector, deser) -> {
if (deser instanceof Deserializer) {
this.delegates.put(selector, (Deserializer<?>) deser);
((Deserializer<?>) deser).configure(configs, isKey);
}
else if (deser instanceof Class) {
instantiateAndConfigure(configs, isKey, this.delegates, selector, (Class<?>) deser);
}
else if (deser instanceof String) {
createInstanceAndConfigure(configs, isKey, this.delegates, selector, (String) deser);
}
else {
throw new IllegalStateException(configKey
+ " map entries must be Serializers or class names, not " + value.getClass());
}
});
}
else if (value instanceof String) {
this.delegates.putAll(createDelegates((String) value, configs, isKey));
}
else {
throw new IllegalStateException(configKey + " must be a map or String, not " + value.getClass());
}
}
private String configKey() {
return this.forKeys
? DelegatingSerializer.KEY_SERIALIZATION_SELECTOR_CONFIG
: DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR_CONFIG;
}
protected static Map<String, Deserializer<?>> createDelegates(String mappings, Map<String, ?> configs,
boolean isKey) {
Map<String, Deserializer<?>> delegateMap = new HashMap<>();
String[] array = StringUtils.commaDelimitedListToStringArray(mappings);
for (String entry : array) {
String[] split = entry.split(":");
Assert.isTrue(split.length == 2, "Each comma-delimited selector entry must have exactly one ':'");
createInstanceAndConfigure(configs, isKey, delegateMap, split[0], split[1]);
}
return delegateMap;
}
protected static void createInstanceAndConfigure(Map<String, ?> configs, boolean isKey,
Map<String, Deserializer<?>> delegateMap, String selector, String className) {
try {
Class<?> clazz = ClassUtils.forName(className.trim(), ClassUtils.getDefaultClassLoader());
instantiateAndConfigure(configs, isKey, delegateMap, selector, clazz);
}
catch (ClassNotFoundException | LinkageError e) {
throw new IllegalArgumentException(e);
}
}
protected static void instantiateAndConfigure(Map<String, ?> configs, boolean isKey,
Map<String, Deserializer<?>> delegateMap, String selector, Class<?> clazz) {
try {
Deserializer<?> delegate = (Deserializer<?>) clazz.getDeclaredConstructor().newInstance();
delegate.configure(configs, isKey);
delegateMap.put(selector.trim(), delegate);
}
catch (Exception e) {
throw new IllegalArgumentException(e);
}
}
public void addDelegate(String selector, Deserializer<?> deserializer) {
this.delegates.put(selector, deserializer);
}
@Nullable
public Deserializer<?> removeDelegate(String selector) {
return this.delegates.remove(selector);
}
@Override
public Object deserialize(String topic, byte[] data) {
throw new UnsupportedOperationException();
}
@Override
public Object deserialize(String topic, Headers headers, byte[] data) {
byte[] value = null;
String selectorKey = selectorKey();
Header header = headers.lastHeader(selectorKey);
if (header != null) {
value = header.value();
}
if (value == null) {
throw new IllegalStateException("No '" + selectorKey + "' header present");
}
String selector = new String(value).replaceAll("\"", "");
Deserializer<? extends Object> deserializer = this.delegates.get(selector);
if (deserializer == null) {
deserializer = trySerdes(selector);
}
if (deserializer == null) {
return data;
}
else {
return deserializer.deserialize(topic, headers, data);
}
}
private String selectorKey() {
return this.forKeys
? DelegatingSerializer.KEY_SERIALIZATION_SELECTOR
: DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR;
}
/*
* Package for testing.
*/
@Nullable
Deserializer<? extends Object> trySerdes(String key) {
try {
Class<?> clazz = ClassUtils.forName(key, ClassUtils.getDefaultClassLoader());
Serde<? extends Object> serdeFrom = Serdes.serdeFrom(clazz);
Deserializer<? extends Object> deserializer = serdeFrom.deserializer();
deserializer.configure(this.autoConfigs, this.forKeys);
this.delegates.put(key, deserializer);
return deserializer;
}
catch (IllegalStateException | ClassNotFoundException | LinkageError e) {
this.delegates.put(key, Serdes.serdeFrom(byte[].class).deserializer());
return null;
}
}
@Override
public void close() {
this.delegates.values().forEach(deser -> deser.close());
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/DelegatingSerializer.java | /*
* Copyright 2019-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.apache.kafka.common.serialization.Serde;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.serialization.Serializer;
import org.springframework.core.log.LogAccessor;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.StringUtils;
/**
* A {@link Serializer} that delegates to other serializers based on a serialization
* selector header. If the header is missing, and the type is supported by {@link Serdes}
* we will delegate to that serializer type.
*
* @author Gary Russell
* @since 2.3
*
*/
public class DelegatingSerializer implements Serializer<Object> {
private static final LogAccessor LOGGER = new LogAccessor(DelegatingDeserializer.class);
/**
* Name of the header containing the serialization selector for values.
*/
public static final String VALUE_SERIALIZATION_SELECTOR = "spring.kafka.serialization.selector";
/**
* Name of the header containing the serialization selector for keys.
*/
public static final String KEY_SERIALIZATION_SELECTOR = "spring.kafka.key.serialization.selector";
/**
* Name of the configuration property containing the serialization selector map for
* values with format {@code selector:class,...}.
*/
public static final String VALUE_SERIALIZATION_SELECTOR_CONFIG = "spring.kafka.serialization.selector.config";
/**
* Name of the configuration property containing the serialization selector map for
* keys with format {@code selector:class,...}.
*/
public static final String KEY_SERIALIZATION_SELECTOR_CONFIG = "spring.kafka.key.serialization.selector.config";
private final Map<String, Serializer<?>> delegates = new ConcurrentHashMap<>();
private final Map<String, Object> autoConfigs = new HashMap<>();
private boolean forKeys;
/**
* Construct an instance that will be configured in {@link #configure(Map, boolean)}
* with producer properties {@link #VALUE_SERIALIZATION_SELECTOR_CONFIG} and
* {@link #KEY_SERIALIZATION_SELECTOR_CONFIG}.
*/
public DelegatingSerializer() {
}
/**
* Construct an instance with the supplied mapping of selectors to delegate
* serializers. The selector must be supplied in the
* {@link #KEY_SERIALIZATION_SELECTOR} and/or {@link #VALUE_SERIALIZATION_SELECTOR}
* headers. It is not necessary to configure standard serializers supported by
* {@link Serdes}.
* @param delegates the map of delegates.
*/
public DelegatingSerializer(Map<String, Serializer<?>> delegates) {
this.delegates.putAll(delegates);
}
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
this.autoConfigs.putAll(configs);
this.forKeys = isKey;
String configKey = configKey();
Object value = configs.get(configKey);
if (value == null) {
return;
}
else if (value instanceof Map) {
((Map<String, Object>) value).forEach((selector, serializer) -> {
if (serializer instanceof Serializer) {
this.delegates.put(selector, (Serializer<?>) serializer);
((Serializer<?>) serializer).configure(configs, isKey);
}
else if (serializer instanceof Class) {
instantiateAndConfigure(configs, isKey, this.delegates, selector, (Class<?>) serializer);
}
else if (serializer instanceof String) {
createInstanceAndConfigure(configs, isKey, this.delegates, selector, (String) serializer);
}
else {
throw new IllegalStateException(configKey
+ " map entries must be Serializers or class names, not " + value.getClass());
}
});
}
else if (value instanceof String) {
this.delegates.putAll(createDelegates((String) value, configs, isKey));
}
else {
throw new IllegalStateException(
configKey + " must be a map or String, not " + value.getClass());
}
}
private String configKey() {
return this.forKeys ? KEY_SERIALIZATION_SELECTOR_CONFIG : VALUE_SERIALIZATION_SELECTOR_CONFIG;
}
protected static Map<String, Serializer<?>> createDelegates(String mappings, Map<String, ?> configs,
boolean isKey) {
Map<String, Serializer<?>> delegateMap = new HashMap<>();
String[] array = StringUtils.commaDelimitedListToStringArray(mappings);
for (String entry : array) {
String[] split = entry.split(":");
Assert.isTrue(split.length == 2, "Each comma-delimited selector entry must have exactly one ':'");
createInstanceAndConfigure(configs, isKey, delegateMap, split[0], split[1]);
}
return delegateMap;
}
protected static void createInstanceAndConfigure(Map<String, ?> configs, boolean isKey,
Map<String, Serializer<?>> delegateMap, String selector, String className) {
try {
Class<?> clazz = ClassUtils.forName(className.trim(), ClassUtils.getDefaultClassLoader());
instantiateAndConfigure(configs, isKey, delegateMap, selector, clazz);
}
catch (ClassNotFoundException | LinkageError e) {
throw new IllegalArgumentException(e);
}
}
protected static void instantiateAndConfigure(Map<String, ?> configs, boolean isKey,
Map<String, Serializer<?>> delegateMap, String selector, Class<?> clazz) {
try {
Serializer<?> delegate = (Serializer<?>) clazz.getDeclaredConstructor().newInstance();
delegate.configure(configs, isKey);
delegateMap.put(selector.trim(), delegate);
}
catch (Exception e) {
throw new IllegalArgumentException(e);
}
}
public void addDelegate(String selector, Serializer<?> serializer) {
this.delegates.put(selector, serializer);
}
@Nullable
public Serializer<?> removeDelegate(String selector) {
return this.delegates.remove(selector);
}
@Override
public byte[] serialize(String topic, Object data) {
throw new UnsupportedOperationException();
}
@Override
public byte[] serialize(String topic, Headers headers, Object data) {
if (data == null) {
return null;
}
byte[] value = null;
String selectorKey = selectorKey();
Header header = headers.lastHeader(selectorKey);
if (header != null) {
value = header.value();
}
if (value == null) {
value = trySerdes(data);
if (value == null) {
throw new IllegalStateException("No '" + selectorKey
+ "' header present and type (" + data.getClass().getName()
+ ") is not supported by Serdes");
}
try {
headers.add(new RecordHeader(selectorKey, value));
}
catch (IllegalStateException e) {
LOGGER.debug(e, () -> "Could not set header for type " + data.getClass());
}
}
String selector = new String(value).replaceAll("\"", "");
@SuppressWarnings("unchecked")
Serializer<Object> serializer = (Serializer<Object>) this.delegates.get(selector);
if (serializer == null) {
throw new IllegalStateException(
"No serializer found for '" + selectorKey + "' header with value '" + selector + "'");
}
return serializer.serialize(topic, headers, data);
}
private String selectorKey() {
return this.forKeys ? KEY_SERIALIZATION_SELECTOR : VALUE_SERIALIZATION_SELECTOR;
}
/*
* Package for testing.
*/
@Nullable
byte[] trySerdes(Object data) {
try {
Serde<? extends Object> serdeFrom = Serdes.serdeFrom(data.getClass());
Serializer<?> serializer = serdeFrom.serializer();
serializer.configure(this.autoConfigs, this.forKeys);
String key = data.getClass().getName();
this.delegates.put(key, serializer);
return key.getBytes();
}
catch (IllegalStateException e) {
return null;
}
}
@Override
public void close() {
this.delegates.values().forEach(ser -> ser.close());
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/DeserializationException.java | /*
* Copyright 2018-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import org.apache.kafka.common.header.Headers;
import org.springframework.kafka.KafkaException;
import org.springframework.lang.Nullable;
/**
* Exception returned in the consumer record value or key when a deserialization failure
* occurs.
*
* @author Gary Russell
* @author Artem Bilan
*
* @since 2.2
*
*/
@SuppressWarnings("serial")
public class DeserializationException extends KafkaException {
@Nullable
private transient Headers headers;
private final byte[] data;
private final boolean isKey;
/**
* Construct an instance with the provided properties.
* @param message the message.
* @param data the data (value or key).
* @param isKey true if the exception occurred while deserializing the key.
* @param cause the cause.
*/
public DeserializationException(String message, byte[] data, boolean isKey, Throwable cause) { // NOSONAR array reference
super(message, cause);
this.data = data; // NOSONAR array reference
this.isKey = isKey;
}
/**
* Get the headers.
* @return the headers.
*/
@Nullable
public Headers getHeaders() {
return this.headers;
}
/**
* Set the headers.
* @param headers the headers.
*/
public void setHeaders(@Nullable Headers headers) {
this.headers = headers;
}
/**
* Get the data that failed deserialization (value or key).
* @return the data.
*/
public byte[] getData() {
return this.data; // NOSONAR array reference
}
/**
* True if deserialization of the key failed, otherwise deserialization of the value
* failed.
* @return true for the key.
*/
public boolean isKey() {
return this.isKey;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/ErrorHandlingDeserializer.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.util.Map;
import java.util.function.Function;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.serialization.Deserializer;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
/**
* Delegating key/value deserializer that catches exceptions, returning them
* in the headers as serialized java objects.
*
* @param <T> class of the entity, representing messages
*
* @author Gary Russell
* @author Artem Bilan
* @author Victor Perez Rey
*
* @since 2.2
*
*/
public class ErrorHandlingDeserializer<T> implements Deserializer<T> {
/**
* Header name for deserialization exceptions.
* @deprecated in favor of {@link SerializationUtils#DESERIALIZER_EXCEPTION_HEADER_PREFIX}.
*/
@Deprecated
public static final String KEY_DESERIALIZER_EXCEPTION_HEADER_PREFIX =
SerializationUtils.DESERIALIZER_EXCEPTION_HEADER_PREFIX;
/**
* Header name for deserialization exceptions.
* @deprecated in favor of {@link SerializationUtils#KEY_DESERIALIZER_EXCEPTION_HEADER}.
*/
@Deprecated
public static final String KEY_DESERIALIZER_EXCEPTION_HEADER = SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER;
/**
* Header name for deserialization exceptions.
* @deprecated in favor of {@link SerializationUtils#VALUE_DESERIALIZER_EXCEPTION_HEADER}.
*/
@Deprecated
public static final String VALUE_DESERIALIZER_EXCEPTION_HEADER = SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER;
/**
* Supplier for a T when deserialization fails.
*/
public static final String KEY_FUNCTION = "spring.deserializer.key.function";
/**
* Supplier for a T when deserialization fails.
*/
public static final String VALUE_FUNCTION = "spring.deserializer.value.function";
/**
* Property name for the delegate key deserializer.
*/
public static final String KEY_DESERIALIZER_CLASS = "spring.deserializer.key.delegate.class";
/**
* Property name for the delegate value deserializer.
*/
public static final String VALUE_DESERIALIZER_CLASS = "spring.deserializer.value.delegate.class";
private Deserializer<T> delegate;
private boolean isForKey;
private Function<FailedDeserializationInfo, T> failedDeserializationFunction;
public ErrorHandlingDeserializer() {
}
public ErrorHandlingDeserializer(Deserializer<T> delegate) {
this.delegate = setupDelegate(delegate);
}
/**
* Provide an alternative supplying mechanism when deserialization fails.
* @param failedDeserializationFunction the {@link Function} to use.
* @since 2.2.8
*/
public void setFailedDeserializationFunction(Function<FailedDeserializationInfo, T> failedDeserializationFunction) {
this.failedDeserializationFunction = failedDeserializationFunction;
}
public boolean isForKey() {
return this.isForKey;
}
/**
* Set to true if this deserializer is to be used as a key deserializer when
* configuring outside of Kafka.
* @param isKey true for a key deserializer, false otherwise.
* @since 2.2.3
*/
public void setForKey(boolean isKey) {
this.isForKey = isKey;
}
/**
* Set to true if this deserializer is to be used as a key deserializer when
* configuring outside of Kafka.
* @param isKey true for a key deserializer, false otherwise.
* @return this
* @since 2.2.3
*/
public ErrorHandlingDeserializer<T> keyDeserializer(boolean isKey) {
this.isForKey = isKey;
return this;
}
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
if (this.delegate == null) {
setupDelegate(configs, isKey ? KEY_DESERIALIZER_CLASS : VALUE_DESERIALIZER_CLASS);
}
Assert.state(this.delegate != null, "No delegate deserializer configured");
this.delegate.configure(configs, isKey);
this.isForKey = isKey;
if (this.failedDeserializationFunction == null) {
setupFunction(configs, isKey ? KEY_FUNCTION : VALUE_FUNCTION);
}
}
public void setupDelegate(Map<String, ?> configs, String configKey) {
if (configs.containsKey(configKey)) {
try {
Object value = configs.get(configKey);
Class<?> clazz = value instanceof Class ? (Class<?>) value : ClassUtils.forName((String) value, null);
this.delegate = setupDelegate(clazz.getDeclaredConstructor().newInstance());
}
catch (Exception e) {
throw new IllegalStateException(e);
}
}
}
@SuppressWarnings("unchecked")
private Deserializer<T> setupDelegate(Object delegate) {
Assert.isInstanceOf(Deserializer.class, delegate, "'delegate' must be a 'Deserializer', not a ");
return (Deserializer<T>) delegate;
}
@SuppressWarnings("unchecked")
private void setupFunction(Map<String, ?> configs, String configKey) {
if (configs.containsKey(configKey)) {
try {
Object value = configs.get(configKey);
Class<?> clazz = value instanceof Class ? (Class<?>) value : ClassUtils.forName((String) value, null);
Assert.isTrue(Function.class.isAssignableFrom(clazz), "'function' must be a 'Function ', not a "
+ clazz.getName());
this.failedDeserializationFunction = (Function<FailedDeserializationInfo, T>)
clazz.getDeclaredConstructor().newInstance();
}
catch (Exception e) {
throw new IllegalStateException(e);
}
}
}
@Override
public T deserialize(String topic, byte[] data) {
try {
return this.delegate.deserialize(topic, data);
}
catch (Exception e) {
return recoverFromSupplier(topic, null, data, e);
}
}
@Override
public T deserialize(String topic, Headers headers, byte[] data) {
try {
if (this.isForKey) {
headers.remove(SerializationUtils.KEY_DESERIALIZER_EXCEPTION_HEADER);
}
else {
headers.remove(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER);
}
return this.delegate.deserialize(topic, headers, data);
}
catch (Exception e) {
SerializationUtils.deserializationException(headers, data, e, this.isForKey);
return recoverFromSupplier(topic, headers, data, e);
}
}
private T recoverFromSupplier(String topic, Headers headers, byte[] data, Exception exception) {
if (this.failedDeserializationFunction != null) {
FailedDeserializationInfo failedDeserializationInfo =
new FailedDeserializationInfo(topic, headers, data, this.isForKey, exception);
return this.failedDeserializationFunction.apply(failedDeserializationInfo);
}
else {
return null;
}
}
@Override
public void close() {
if (this.delegate != null) {
this.delegate.close();
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/FailedDeserializationInfo.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.util.Arrays;
import org.apache.kafka.common.header.Headers;
/**
* Class containing all the contextual information around a deserialization error.
*
* @author Victor Perez Rey
* @author Artem Bilan
*
* @since 2.2.8
*/
public class FailedDeserializationInfo {
private final String topic;
private final Headers headers;
private final byte[] data;
private final boolean isForKey;
private final Exception exception;
/**
* Construct an instance with the contextual information.
* @param topic topic associated with the data.
* @param headers headers associated with the record; may be empty.
* @param data serialized bytes; may be null.
* @param isForKey true for a key deserializer, false otherwise.
* @param exception exception causing the deserialization error.
*/
public FailedDeserializationInfo(String topic, Headers headers, byte[] data, boolean isForKey,
Exception exception) {
this.topic = topic;
this.headers = headers;
this.data = Arrays.copyOf(data, data.length);
this.isForKey = isForKey;
this.exception = exception;
}
public String getTopic() {
return this.topic;
}
public Headers getHeaders() {
return this.headers;
}
public byte[] getData() {
return Arrays.copyOf(this.data, this.data.length);
}
public boolean isForKey() {
return this.isForKey;
}
public Exception getException() {
return this.exception;
}
@Override
public String toString() {
return "FailedDeserializationInfo{" +
"topic='" + this.topic + '\'' +
", headers=" + this.headers +
", data=" + Arrays.toString(this.data) +
", isForKey=" + this.isForKey +
", exception=" + this.exception +
'}';
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/JsonDeserializer.java | /*
* Copyright 2015-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.function.BiFunction;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.serialization.Deserializer;
import org.springframework.core.ResolvableType;
import org.springframework.kafka.support.JacksonUtils;
import org.springframework.kafka.support.mapping.AbstractJavaTypeMapper;
import org.springframework.kafka.support.mapping.DefaultJackson2JavaTypeMapper;
import org.springframework.kafka.support.mapping.Jackson2JavaTypeMapper;
import org.springframework.kafka.support.mapping.Jackson2JavaTypeMapper.TypePrecedence;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.StringUtils;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JavaType;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.type.TypeFactory;
/**
* Generic {@link org.apache.kafka.common.serialization.Deserializer Deserializer} for
* receiving JSON from Kafka and return Java objects.
* <p>
* IMPORTANT: Configuration must be done completely with property setters or via
* {@link #configure(Map, boolean)}, not a mixture. If any setters have been called,
* {@link #configure(Map, boolean)} will be a no-op.
*
* @param <T> class of the entity, representing messages
*
* @author Igor Stepanov
* @author Artem Bilan
* @author Gary Russell
* @author Yanming Zhou
* @author Elliot Kennedy
* @author Torsten Schleede
* @author Ivan Ponomarev
*/
public class JsonDeserializer<T> implements Deserializer<T> {
/**
* Kafka config property for the default key type if no header.
*/
public static final String KEY_DEFAULT_TYPE = "spring.json.key.default.type";
/**
* Kafka config property for the default value type if no header.
*/
public static final String VALUE_DEFAULT_TYPE = "spring.json.value.default.type";
/**
* Kafka config property for trusted deserialization packages.
*/
public static final String TRUSTED_PACKAGES = "spring.json.trusted.packages";
/**
* Kafka config property to add type mappings to the type mapper:
* 'foo=com.Foo,bar=com.Bar'.
*/
public static final String TYPE_MAPPINGS = JsonSerializer.TYPE_MAPPINGS;
/**
* Kafka config property for removing type headers (default true).
*/
public static final String REMOVE_TYPE_INFO_HEADERS = "spring.json.remove.type.headers";
/**
* Kafka config property for using type headers (default true).
* @since 2.2.3
*/
public static final String USE_TYPE_INFO_HEADERS = "spring.json.use.type.headers";
/**
* A method name to determine the {@link JavaType} to deserialize the key to:
* 'com.Foo.deserialize'. See {@link JsonTypeResolver#resolveType} for the signature.
*/
public static final String KEY_TYPE_METHOD = "spring.json.key.type.method";
/**
* A method name to determine the {@link JavaType} to deserialize the value to:
* 'com.Foo.deserialize'. See {@link JsonTypeResolver#resolveType} for the signature.
*/
public static final String VALUE_TYPE_METHOD = "spring.json.value.type.method";
private static final Set<String> OUR_KEYS = new HashSet<>();
static {
OUR_KEYS.add(KEY_DEFAULT_TYPE);
OUR_KEYS.add(VALUE_DEFAULT_TYPE);
OUR_KEYS.add(TRUSTED_PACKAGES);
OUR_KEYS.add(TYPE_MAPPINGS);
OUR_KEYS.add(REMOVE_TYPE_INFO_HEADERS);
OUR_KEYS.add(USE_TYPE_INFO_HEADERS);
OUR_KEYS.add(KEY_TYPE_METHOD);
OUR_KEYS.add(VALUE_TYPE_METHOD);
}
protected final ObjectMapper objectMapper; // NOSONAR
protected JavaType targetType; // NOSONAR
protected Jackson2JavaTypeMapper typeMapper = new DefaultJackson2JavaTypeMapper(); // NOSONAR
private ObjectReader reader;
private boolean typeMapperExplicitlySet = false;
private boolean removeTypeHeaders = true;
private boolean useTypeHeaders = true;
private JsonTypeResolver typeResolver;
private boolean setterCalled;
private boolean configured;
/**
* Construct an instance with a default {@link ObjectMapper}.
*/
public JsonDeserializer() {
this((Class<T>) null, true);
}
/**
* Construct an instance with the provided {@link ObjectMapper}.
* @param objectMapper a custom object mapper.
*/
public JsonDeserializer(ObjectMapper objectMapper) {
this((Class<T>) null, objectMapper, true);
}
/**
* Construct an instance with the provided target type, and a default
* {@link ObjectMapper}.
* @param targetType the target type to use if no type info headers are present.
*/
public JsonDeserializer(@Nullable Class<? super T> targetType) {
this(targetType, true);
}
/**
* Construct an instance with the provided target type, and a default {@link ObjectMapper}.
* @param targetType the target type reference to use if no type info headers are present.
* @since 2.3
*/
public JsonDeserializer(@Nullable TypeReference<? super T> targetType) {
this(targetType, true);
}
/**
* Construct an instance with the provided target type, and a default {@link ObjectMapper}.
* @param targetType the target java type to use if no type info headers are present.
* @since 2.3
*/
public JsonDeserializer(@Nullable JavaType targetType) {
this(targetType, true);
}
/**
* Construct an instance with the provided target type, and
* useHeadersIfPresent with a default {@link ObjectMapper}.
* @param targetType the target type.
* @param useHeadersIfPresent true to use headers if present and fall back to target
* type if not.
* @since 2.2
*/
public JsonDeserializer(@Nullable Class<? super T> targetType, boolean useHeadersIfPresent) {
this(targetType, JacksonUtils.enhancedObjectMapper(), useHeadersIfPresent);
}
/**
* Construct an instance with the provided target type, and
* useHeadersIfPresent with a default {@link ObjectMapper}.
* @param targetType the target type reference.
* @param useHeadersIfPresent true to use headers if present and fall back to target
* type if not.
* @since 2.3
*/
public JsonDeserializer(TypeReference<? super T> targetType, boolean useHeadersIfPresent) {
this(targetType, JacksonUtils.enhancedObjectMapper(), useHeadersIfPresent);
}
/**
* Construct an instance with the provided target type, and
* useHeadersIfPresent with a default {@link ObjectMapper}.
* @param targetType the target java type.
* @param useHeadersIfPresent true to use headers if present and fall back to target
* type if not.
* @since 2.3
*/
public JsonDeserializer(JavaType targetType, boolean useHeadersIfPresent) {
this(targetType, JacksonUtils.enhancedObjectMapper(), useHeadersIfPresent);
}
/**
* Construct an instance with the provided target type, and {@link ObjectMapper}.
* @param targetType the target type to use if no type info headers are present.
* @param objectMapper the mapper. type if not.
*/
public JsonDeserializer(Class<? super T> targetType, ObjectMapper objectMapper) {
this(targetType, objectMapper, true);
}
/**
* Construct an instance with the provided target type, and {@link ObjectMapper}.
* @param targetType the target type reference to use if no type info headers are present.
* @param objectMapper the mapper. type if not.
*/
public JsonDeserializer(TypeReference<? super T> targetType, ObjectMapper objectMapper) {
this(targetType, objectMapper, true);
}
/**
* Construct an instance with the provided target type, and {@link ObjectMapper}.
* @param targetType the target java type to use if no type info headers are present.
* @param objectMapper the mapper. type if not.
*/
public JsonDeserializer(JavaType targetType, ObjectMapper objectMapper) {
this(targetType, objectMapper, true);
}
/**
* Construct an instance with the provided target type, {@link ObjectMapper} and
* useHeadersIfPresent.
* @param targetType the target type.
* @param objectMapper the mapper.
* @param useHeadersIfPresent true to use headers if present and fall back to target
* type if not.
* @since 2.2
*/
public JsonDeserializer(@Nullable Class<? super T> targetType, ObjectMapper objectMapper,
boolean useHeadersIfPresent) {
Assert.notNull(objectMapper, "'objectMapper' must not be null.");
this.objectMapper = objectMapper;
JavaType javaType = null;
if (targetType == null) {
Class<?> genericType = ResolvableType.forClass(getClass()).getSuperType().resolveGeneric(0);
if (genericType != null) {
javaType = TypeFactory.defaultInstance().constructType(genericType);
}
}
else {
javaType = TypeFactory.defaultInstance().constructType(targetType);
}
initialize(javaType, useHeadersIfPresent);
}
/**
* Construct an instance with the provided target type, {@link ObjectMapper} and
* useHeadersIfPresent.
* @param targetType the target type reference.
* @param objectMapper the mapper.
* @param useHeadersIfPresent true to use headers if present and fall back to target
* type if not.
* @since 2.3
*/
public JsonDeserializer(TypeReference<? super T> targetType, ObjectMapper objectMapper,
boolean useHeadersIfPresent) {
this(targetType != null ? TypeFactory.defaultInstance().constructType(targetType) : null,
objectMapper, useHeadersIfPresent);
}
/**
* Construct an instance with the provided target type, {@link ObjectMapper} and
* useHeadersIfPresent.
* @param targetType the target type reference.
* @param objectMapper the mapper.
* @param useHeadersIfPresent true to use headers if present and fall back to target
* type if not.
* @since 2.3
*/
public JsonDeserializer(@Nullable JavaType targetType, ObjectMapper objectMapper,
boolean useHeadersIfPresent) {
Assert.notNull(objectMapper, "'objectMapper' must not be null.");
this.objectMapper = objectMapper;
initialize(targetType, useHeadersIfPresent);
}
public Jackson2JavaTypeMapper getTypeMapper() {
return this.typeMapper;
}
/**
* Set a customized type mapper. If the mapper is an {@link AbstractJavaTypeMapper},
* any class mappings configured in the mapper will be added to the trusted packages.
* @param typeMapper the type mapper.
* @since 2.1
*/
public void setTypeMapper(Jackson2JavaTypeMapper typeMapper) {
Assert.notNull(typeMapper, "'typeMapper' cannot be null");
this.typeMapper = typeMapper;
this.typeMapperExplicitlySet = true;
if (typeMapper instanceof AbstractJavaTypeMapper) {
addMappingsToTrusted(((AbstractJavaTypeMapper) typeMapper).getIdClassMapping());
}
this.setterCalled = true;
}
/**
* Configure the default Jackson2JavaTypeMapper to use key type headers.
* @param isKey Use key type headers if true
* @since 2.1.3
*/
public void setUseTypeMapperForKey(boolean isKey) {
doSetUseTypeMapperForKey(isKey);
this.setterCalled = true;
}
private void doSetUseTypeMapperForKey(boolean isKey) {
if (!this.typeMapperExplicitlySet
&& this.getTypeMapper() instanceof AbstractJavaTypeMapper) {
((AbstractJavaTypeMapper) this.getTypeMapper()).setUseForKey(isKey);
}
}
/**
* Set to false to retain type information headers after deserialization.
* Default true.
* @param removeTypeHeaders true to remove headers.
* @since 2.2
*/
public void setRemoveTypeHeaders(boolean removeTypeHeaders) {
this.removeTypeHeaders = removeTypeHeaders;
this.setterCalled = true;
}
/**
* Set to false to ignore type information in headers and use the configured
* target type instead.
* Only applies if the preconfigured type mapper is used.
* Default true.
* @param useTypeHeaders false to ignore type headers.
* @since 2.2.8
*/
public void setUseTypeHeaders(boolean useTypeHeaders) {
if (!this.typeMapperExplicitlySet) {
this.useTypeHeaders = useTypeHeaders;
setUpTypePrecedence(Collections.emptyMap());
}
this.setterCalled = true;
}
/**
* Set a {@link BiFunction} that receives the data to be deserialized and the headers
* and returns a JavaType.
* @param typeFunction the function.
* @since 2.5
*/
public void setTypeFunction(BiFunction<byte[], Headers, JavaType> typeFunction) {
this.typeResolver = (topic, data, headers) -> typeFunction.apply(data, headers);
this.setterCalled = true;
}
/**
* Set a {@link JsonTypeResolver} that receives the data to be deserialized and the headers
* and returns a JavaType.
* @param typeResolver the resolver.
* @since 2.5.3
*/
public void setTypeResolver(JsonTypeResolver typeResolver) {
this.typeResolver = typeResolver;
this.setterCalled = true;
}
@Override
public synchronized void configure(Map<String, ?> configs, boolean isKey) {
if (this.configured) {
return;
}
Assert.state(!this.setterCalled || !configsHasOurKeys(configs),
"JsonDeserializer must be configured with property setters, or via configuration properties; not both");
doSetUseTypeMapperForKey(isKey);
setUpTypePrecedence(configs);
setupTarget(configs, isKey);
if (configs.containsKey(TRUSTED_PACKAGES)
&& configs.get(TRUSTED_PACKAGES) instanceof String) {
this.typeMapper.addTrustedPackages(
StringUtils.delimitedListToStringArray((String) configs.get(TRUSTED_PACKAGES), ",", " \r\n\f\t"));
}
if (configs.containsKey(TYPE_MAPPINGS) && !this.typeMapperExplicitlySet
&& this.typeMapper instanceof AbstractJavaTypeMapper) {
((AbstractJavaTypeMapper) this.typeMapper).setIdClassMapping(createMappings(configs));
}
if (configs.containsKey(REMOVE_TYPE_INFO_HEADERS)) {
this.removeTypeHeaders = Boolean.parseBoolean(configs.get(REMOVE_TYPE_INFO_HEADERS).toString());
}
setUpTypeMethod(configs, isKey);
this.configured = true;
}
private boolean configsHasOurKeys(Map<String, ?> configs) {
for (String key : configs.keySet()) {
if (OUR_KEYS.contains(key)) {
return true;
}
}
return false;
}
private Map<String, Class<?>> createMappings(Map<String, ?> configs) {
Map<String, Class<?>> mappings =
JsonSerializer.createMappings(configs.get(JsonSerializer.TYPE_MAPPINGS).toString());
addMappingsToTrusted(mappings);
return mappings;
}
private void setUpTypeMethod(Map<String, ?> configs, boolean isKey) {
if (isKey && configs.containsKey(KEY_TYPE_METHOD)) {
setUpTypeResolver((String) configs.get(KEY_TYPE_METHOD));
}
else if (!isKey && configs.containsKey(VALUE_TYPE_METHOD)) {
setUpTypeResolver((String) configs.get(VALUE_TYPE_METHOD));
}
}
private void setUpTypeResolver(String method) {
try {
this.typeResolver = buildTypeResolver(method);
}
catch (IllegalStateException e) {
if (e.getCause() instanceof NoSuchMethodException) {
this.typeResolver = (topic, data, headers) ->
(JavaType) SerializationUtils.propertyToMethodInvokingFunction(
method, byte[].class, getClass().getClassLoader()).apply(data, headers);
return;
}
throw e;
}
}
private void setUpTypePrecedence(Map<String, ?> configs) {
if (!this.typeMapperExplicitlySet) {
if (configs.containsKey(USE_TYPE_INFO_HEADERS)) {
this.useTypeHeaders = Boolean.parseBoolean(configs.get(USE_TYPE_INFO_HEADERS).toString());
}
this.typeMapper.setTypePrecedence(this.useTypeHeaders ? TypePrecedence.TYPE_ID : TypePrecedence.INFERRED);
}
}
private void setupTarget(Map<String, ?> configs, boolean isKey) {
try {
JavaType javaType = null;
if (isKey && configs.containsKey(KEY_DEFAULT_TYPE)) {
javaType = setupTargetType(configs, KEY_DEFAULT_TYPE);
}
else if (!isKey && configs.containsKey(VALUE_DEFAULT_TYPE)) {
javaType = setupTargetType(configs, VALUE_DEFAULT_TYPE);
}
if (javaType != null) {
initialize(javaType, TypePrecedence.TYPE_ID.equals(this.typeMapper.getTypePrecedence()));
}
}
catch (ClassNotFoundException | LinkageError e) {
throw new IllegalStateException(e);
}
}
private void initialize(@Nullable JavaType type, boolean useHeadersIfPresent) {
this.targetType = type;
this.useTypeHeaders = useHeadersIfPresent;
Assert.isTrue(this.targetType != null || useHeadersIfPresent,
"'targetType' cannot be null if 'useHeadersIfPresent' is false");
if (this.targetType != null) {
this.reader = this.objectMapper.readerFor(this.targetType);
}
addTargetPackageToTrusted();
this.typeMapper.setTypePrecedence(useHeadersIfPresent ? TypePrecedence.TYPE_ID : TypePrecedence.INFERRED);
}
private JavaType setupTargetType(Map<String, ?> configs, String key) throws ClassNotFoundException, LinkageError {
if (configs.get(key) instanceof Class) {
return TypeFactory.defaultInstance().constructType((Class<?>) configs.get(key));
}
else if (configs.get(key) instanceof String) {
return TypeFactory.defaultInstance()
.constructType(ClassUtils.forName((String) configs.get(key), null));
}
else {
throw new IllegalStateException(key + " must be Class or String");
}
}
/**
* Add trusted packages for deserialization.
* @param packages the packages.
* @since 2.1
*/
public synchronized void addTrustedPackages(String... packages) {
doAddTrustedPackages(packages);
this.setterCalled = true;
}
private void addMappingsToTrusted(Map<String, Class<?>> mappings) {
mappings.values().forEach(clazz -> {
String packageName = clazz.isArray()
? clazz.getComponentType().getPackage().getName()
: clazz.getPackage().getName();
doAddTrustedPackages(packageName);
doAddTrustedPackages(packageName + ".*");
});
}
private void addTargetPackageToTrusted() {
String targetPackageName = getTargetPackageName();
if (targetPackageName != null) {
doAddTrustedPackages(targetPackageName);
doAddTrustedPackages(targetPackageName + ".*");
}
}
private String getTargetPackageName() {
if (this.targetType != null) {
return ClassUtils.getPackageName(this.targetType.getRawClass()).replaceFirst("\\[L", "");
}
return null;
}
private void doAddTrustedPackages(String... packages) {
this.typeMapper.addTrustedPackages(packages);
}
@Override
public T deserialize(String topic, Headers headers, byte[] data) {
if (data == null) {
return null;
}
ObjectReader deserReader = null;
JavaType javaType = null;
if (this.typeResolver != null) {
javaType = this.typeResolver.resolveType(topic, data, headers);
}
if (javaType == null && this.typeMapper.getTypePrecedence().equals(TypePrecedence.TYPE_ID)) {
javaType = this.typeMapper.toJavaType(headers);
}
if (javaType != null) {
deserReader = this.objectMapper.readerFor(javaType);
}
if (this.removeTypeHeaders) {
this.typeMapper.removeHeaders(headers);
}
if (deserReader == null) {
deserReader = this.reader;
}
Assert.state(deserReader != null, "No type information in headers and no default type provided");
try {
return deserReader.readValue(data);
}
catch (IOException e) {
throw new SerializationException("Can't deserialize data [" + Arrays.toString(data) +
"] from topic [" + topic + "]", e);
}
}
@Override
public T deserialize(String topic, @Nullable byte[] data) {
if (data == null) {
return null;
}
ObjectReader localReader = this.reader;
if (this.typeResolver != null) {
JavaType javaType = this.typeResolver.resolveType(topic, data, null);
if (javaType != null) {
localReader = this.objectMapper.readerFor(javaType);
}
}
Assert.state(localReader != null, "No headers available and no default type provided");
try {
return localReader.readValue(data);
}
catch (IOException e) {
throw new SerializationException("Can't deserialize data [" + Arrays.toString(data) +
"] from topic [" + topic + "]", e);
}
}
@Override
public void close() {
// No-op
}
/**
* Copies this deserializer with same configuration, except new target type is used.
* @param newTargetType type used for when type headers are missing, not null
* @param <X> new deserialization result type
* @return new instance of deserializer with type changes
* @since 2.6
*/
public <X> JsonDeserializer<X> copyWithType(Class<? super X> newTargetType) {
return copyWithType(this.objectMapper.constructType(newTargetType));
}
/**
* Copies this deserializer with same configuration, except new target type reference is used.
* @param newTargetType type reference used for when type headers are missing, not null
* @param <X> new deserialization result type
* @return new instance of deserializer with type changes
* @since 2.6
*/
public <X> JsonDeserializer<X> copyWithType(TypeReference<? super X> newTargetType) {
return copyWithType(this.objectMapper.constructType(newTargetType.getType()));
}
/**
* Copies this deserializer with same configuration, except new target java type is used.
* @param newTargetType java type used for when type headers are missing, not null
* @param <X> new deserialization result type
* @return new instance of deserializer with type changes
* @since 2.6
*/
public <X> JsonDeserializer<X> copyWithType(JavaType newTargetType) {
JsonDeserializer<X> result = new JsonDeserializer<>(newTargetType, this.objectMapper, this.useTypeHeaders);
result.removeTypeHeaders = this.removeTypeHeaders;
result.typeMapper = this.typeMapper;
result.typeMapperExplicitlySet = this.typeMapperExplicitlySet;
return result;
}
// Fluent API
/**
* Designate this deserializer for deserializing keys (default is values); only
* applies if the default type mapper is used.
* @return the deserializer.
* @since 2.3
*/
public JsonDeserializer<T> forKeys() {
setUseTypeMapperForKey(true);
return this;
}
/**
* Don't remove type information headers.
* @return the deserializer.
* @since 2.3
* @see #setRemoveTypeHeaders(boolean)
*/
public JsonDeserializer<T> dontRemoveTypeHeaders() {
setRemoveTypeHeaders(false);
return this;
}
/**
* Ignore type information headers and use the configured target class.
* @return the deserializer.
* @since 2.3
* @see #setUseTypeHeaders(boolean)
*/
public JsonDeserializer<T> ignoreTypeHeaders() {
setUseTypeHeaders(false);
return this;
}
/**
* Use the supplied {@link Jackson2JavaTypeMapper}.
* @param mapper the mapper.
* @return the deserializer.
* @since 2.3
* @see #setTypeMapper(Jackson2JavaTypeMapper)
*/
public JsonDeserializer<T> typeMapper(Jackson2JavaTypeMapper mapper) {
setTypeMapper(mapper);
return this;
}
/**
* Add trusted packages to the default type mapper.
* @param packages the packages.
* @return the deserializer.
* @since 2,5
*/
public synchronized JsonDeserializer<T> trustedPackages(String... packages) {
Assert.isTrue(!this.typeMapperExplicitlySet, "When using a custom type mapper, set the trusted packages there");
this.typeMapper.addTrustedPackages(packages);
return this;
}
/**
* Set a {@link BiFunction} that receives the data to be deserialized and the headers
* and returns a JavaType.
* @param typeFunction the function.
* @return the deserializer.
* @since 2.5
*/
public JsonDeserializer<T> typeFunction(BiFunction<byte[], Headers, JavaType> typeFunction) {
setTypeFunction(typeFunction);
return this;
}
/**
* Set a {@link JsonTypeResolver} that receives the data to be deserialized and the headers
* and returns a JavaType.
* @param resolver the resolver.
* @return the deserializer.
* @since 2.5.3
*/
public JsonDeserializer<T> typeResolver(JsonTypeResolver resolver) {
setTypeResolver(resolver);
return this;
}
private JsonTypeResolver buildTypeResolver(String methodProperty) {
int lastDotPosn = methodProperty.lastIndexOf('.');
Assert.state(lastDotPosn > 1,
"the method property needs to be a class name followed by the method name, separated by '.'");
Class<?> clazz;
try {
clazz = ClassUtils.forName(methodProperty.substring(0, lastDotPosn), getClass().getClassLoader());
}
catch (ClassNotFoundException | LinkageError e) {
throw new IllegalStateException(e);
}
String methodName = methodProperty.substring(lastDotPosn + 1);
Method method;
try {
method = clazz.getDeclaredMethod(methodName, String.class, byte[].class, Headers.class);
Assert.state(JavaType.class.isAssignableFrom(method.getReturnType()),
method + " return type must be JavaType");
Assert.state(Modifier.isStatic(method.getModifiers()), method + " must be static");
}
catch (SecurityException | NoSuchMethodException e) {
throw new IllegalStateException(e);
}
return (topic, data, headers) -> {
try {
return (JavaType) method.invoke(null, topic, data, headers);
}
catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
throw new IllegalStateException(e);
}
};
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/JsonSerde.java | /*
* Copyright 2017-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.util.Map;
import org.apache.kafka.common.serialization.Serde;
import org.springframework.core.ResolvableType;
import org.springframework.kafka.support.JacksonUtils;
import org.springframework.kafka.support.mapping.Jackson2JavaTypeMapper;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JavaType;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* A {@link org.apache.kafka.common.serialization.Serde} that provides serialization and
* deserialization in JSON format.
* <p>
* The implementation delegates to underlying {@link JsonSerializer} and
* {@link JsonDeserializer} implementations.
*
* @param <T> target class for serialization/deserialization
*
* @author Marius Bogoevici
* @author Elliot Kennedy
* @author Gary Russell
* @author Ivan Ponomarev
*
* @since 1.1.5
*/
public class JsonSerde<T> implements Serde<T> {
private final JsonSerializer<T> jsonSerializer;
private final JsonDeserializer<T> jsonDeserializer;
public JsonSerde() {
this((JavaType) null, JacksonUtils.enhancedObjectMapper());
}
public JsonSerde(@Nullable Class<? super T> targetType) {
this(targetType, JacksonUtils.enhancedObjectMapper());
}
public JsonSerde(@Nullable TypeReference<? super T> targetType) {
this(targetType, JacksonUtils.enhancedObjectMapper());
}
public JsonSerde(@Nullable JavaType targetType) {
this(targetType, JacksonUtils.enhancedObjectMapper());
}
public JsonSerde(ObjectMapper objectMapper) {
this((JavaType) null, objectMapper);
}
public JsonSerde(@Nullable TypeReference<? super T> targetType, ObjectMapper objectMapper) {
this(targetType == null ? null : objectMapper.constructType(targetType.getType()), objectMapper);
}
public JsonSerde(@Nullable Class<? super T> targetType, ObjectMapper objectMapper) {
this(targetType == null ? null : objectMapper.constructType(targetType), objectMapper);
}
public JsonSerde(@Nullable JavaType targetTypeArg, @Nullable ObjectMapper objectMapperArg) {
ObjectMapper objectMapper = objectMapperArg == null ? JacksonUtils.enhancedObjectMapper() : objectMapperArg;
JavaType actualJavaType;
if (targetTypeArg != null) {
actualJavaType = targetTypeArg;
}
else {
Class<?> resolvedGeneric = ResolvableType.forClass(getClass()).getSuperType().resolveGeneric(0);
actualJavaType = resolvedGeneric != null ? objectMapper.constructType(resolvedGeneric) : null;
}
this.jsonSerializer = new JsonSerializer<>(actualJavaType, objectMapper);
this.jsonDeserializer = new JsonDeserializer<>(actualJavaType, objectMapper);
}
public JsonSerde(JsonSerializer<T> jsonSerializer, JsonDeserializer<T> jsonDeserializer) {
Assert.notNull(jsonSerializer, "'jsonSerializer' must not be null.");
Assert.notNull(jsonDeserializer, "'jsonDeserializer' must not be null.");
this.jsonSerializer = jsonSerializer;
this.jsonDeserializer = jsonDeserializer;
}
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
this.jsonSerializer.configure(configs, isKey);
this.jsonDeserializer.configure(configs, isKey);
}
@Override
public void close() {
this.jsonSerializer.close();
this.jsonDeserializer.close();
}
@Override
public JsonSerializer<T> serializer() {
return this.jsonSerializer;
}
@Override
public JsonDeserializer<T> deserializer() {
return this.jsonDeserializer;
}
/**
* Copies this serde with same configuration, except new target type is used.
* @param newTargetType type reference forced for serialization, and used as default for deserialization, not null
* @param <X> new deserialization result type and serialization source type
* @return new instance of serde with type changes
* @since 2.6
*/
public <X> JsonSerde<X> copyWithType(Class<? super X> newTargetType) {
return new JsonSerde<>(this.jsonSerializer.copyWithType(newTargetType),
this.jsonDeserializer.copyWithType(newTargetType));
}
/**
* Copies this serde with same configuration, except new target type reference is used.
* @param newTargetType type reference forced for serialization, and used as default for deserialization, not null
* @param <X> new deserialization result type and serialization source type
* @return new instance of serde with type changes
* @since 2.6
*/
public <X> JsonSerde<X> copyWithType(TypeReference<? super X> newTargetType) {
return new JsonSerde<>(this.jsonSerializer.copyWithType(newTargetType),
this.jsonDeserializer.copyWithType(newTargetType));
}
/**
* Copies this serde with same configuration, except new target java type is used.
* @param newTargetType java type forced for serialization, and used as default for deserialization, not null
* @param <X> new deserialization result type and serialization source type
* @return new instance of serde with type changes
* @since 2.6
*/
public <X> JsonSerde<X> copyWithType(JavaType newTargetType) {
return new JsonSerde<>(this.jsonSerializer.copyWithType(newTargetType),
this.jsonDeserializer.copyWithType(newTargetType));
}
// Fluent API
/**
* Designate this Serde for serializing/deserializing keys (default is values).
* @return the serde.
* @since 2.3
*/
public JsonSerde<T> forKeys() {
this.jsonSerializer.forKeys();
this.jsonDeserializer.forKeys();
return this;
}
/**
* Configure the serializer to not add type information.
* @return the serde.
* @since 2.3
*/
public JsonSerde<T> noTypeInfo() {
this.jsonSerializer.noTypeInfo();
return this;
}
/**
* Don't remove type information headers after deserialization.
* @return the serde.
* @since 2.3
*/
public JsonSerde<T> dontRemoveTypeHeaders() {
this.jsonDeserializer.dontRemoveTypeHeaders();
return this;
}
/**
* Ignore type information headers and use the configured target class.
* @return the serde.
* @since 2.3
*/
public JsonSerde<T> ignoreTypeHeaders() {
this.jsonDeserializer.ignoreTypeHeaders();
return this;
}
/**
* Use the supplied {@link Jackson2JavaTypeMapper}.
* @param mapper the mapper.
* @return the serde.
* @since 2.3
*/
public JsonSerde<T> typeMapper(Jackson2JavaTypeMapper mapper) {
this.jsonSerializer.setTypeMapper(mapper);
this.jsonDeserializer.setTypeMapper(mapper);
return this;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/JsonSerializer.java | /*
* Copyright 2016-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.serialization.Serializer;
import org.springframework.kafka.support.JacksonUtils;
import org.springframework.kafka.support.mapping.AbstractJavaTypeMapper;
import org.springframework.kafka.support.mapping.DefaultJackson2JavaTypeMapper;
import org.springframework.kafka.support.mapping.Jackson2JavaTypeMapper;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.StringUtils;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JavaType;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
/**
* Generic {@link org.apache.kafka.common.serialization.Serializer Serializer} for sending
* Java objects to Kafka as JSON.
* <p>
* IMPORTANT: Configuration must be done completely with property setters or via
* {@link #configure(Map, boolean)}, not a mixture. If any setters have been called,
* {@link #configure(Map, boolean)} will be a no-op.
*
* @param <T> class of the entity, representing messages
*
* @author Igor Stepanov
* @author Artem Bilan
* @author Gary Russell
* @author Elliot Kennedy
*/
public class JsonSerializer<T> implements Serializer<T> {
/**
* Kafka config property for disabling adding type headers.
*/
public static final String ADD_TYPE_INFO_HEADERS = "spring.json.add.type.headers";
/**
* Kafka config property to add type mappings to the type mapper:
* 'foo:com.Foo,bar:com.Bar'.
*/
public static final String TYPE_MAPPINGS = "spring.json.type.mapping";
protected final ObjectMapper objectMapper; // NOSONAR
protected boolean addTypeInfo = true; // NOSONAR
private ObjectWriter writer;
protected Jackson2JavaTypeMapper typeMapper = new DefaultJackson2JavaTypeMapper(); // NOSONAR
private boolean typeMapperExplicitlySet = false;
private boolean setterCalled;
private boolean configured;
public JsonSerializer() {
this((JavaType) null, JacksonUtils.enhancedObjectMapper());
}
public JsonSerializer(TypeReference<? super T> targetType) {
this(targetType, JacksonUtils.enhancedObjectMapper());
}
public JsonSerializer(ObjectMapper objectMapper) {
this((JavaType) null, objectMapper);
}
public JsonSerializer(TypeReference<? super T> targetType, ObjectMapper objectMapper) {
this(targetType == null ? null : objectMapper.constructType(targetType.getType()), objectMapper);
}
public JsonSerializer(JavaType targetType, ObjectMapper objectMapper) {
Assert.notNull(objectMapper, "'objectMapper' must not be null.");
this.objectMapper = objectMapper;
this.writer = objectMapper.writerFor(targetType);
}
public boolean isAddTypeInfo() {
return this.addTypeInfo;
}
/**
* Set to false to disable adding type info headers.
* @param addTypeInfo true to add headers.
* @since 2.1
*/
public void setAddTypeInfo(boolean addTypeInfo) {
this.addTypeInfo = addTypeInfo;
this.setterCalled = true;
}
public Jackson2JavaTypeMapper getTypeMapper() {
return this.typeMapper;
}
/**
* Set a customized type mapper.
* @param typeMapper the type mapper.
* @since 2.1
*/
public void setTypeMapper(Jackson2JavaTypeMapper typeMapper) {
Assert.notNull(typeMapper, "'typeMapper' cannot be null");
this.typeMapper = typeMapper;
this.typeMapperExplicitlySet = true;
this.setterCalled = true;
}
/**
* Configure the default Jackson2JavaTypeMapper to use key type headers.
* @param isKey Use key type headers if true
* @since 2.1.3
*/
public void setUseTypeMapperForKey(boolean isKey) {
if (!this.typeMapperExplicitlySet && getTypeMapper() instanceof AbstractJavaTypeMapper) {
((AbstractJavaTypeMapper) getTypeMapper())
.setUseForKey(isKey);
}
this.setterCalled = true;
}
@Override
public synchronized void configure(Map<String, ?> configs, boolean isKey) {
if (this.configured) {
return;
}
Assert.state(!this.setterCalled
|| (!configs.containsKey(ADD_TYPE_INFO_HEADERS) && !configs.containsKey(TYPE_MAPPINGS)),
"JsonSerializer must be configured with property setters, or via configuration properties; not both");
setUseTypeMapperForKey(isKey);
if (configs.containsKey(ADD_TYPE_INFO_HEADERS)) {
Object config = configs.get(ADD_TYPE_INFO_HEADERS);
if (config instanceof Boolean) {
this.addTypeInfo = (Boolean) config;
}
else if (config instanceof String) {
this.addTypeInfo = Boolean.valueOf((String) config);
}
else {
throw new IllegalStateException(ADD_TYPE_INFO_HEADERS + " must be Boolean or String");
}
}
if (configs.containsKey(TYPE_MAPPINGS) && !this.typeMapperExplicitlySet
&& this.typeMapper instanceof AbstractJavaTypeMapper) {
((AbstractJavaTypeMapper) this.typeMapper)
.setIdClassMapping(createMappings((String) configs.get(TYPE_MAPPINGS)));
}
this.configured = true;
}
protected static Map<String, Class<?>> createMappings(String mappings) {
Map<String, Class<?>> mappingsMap = new HashMap<>();
String[] array = StringUtils.commaDelimitedListToStringArray(mappings);
for (String entry : array) {
String[] split = entry.split(":");
Assert.isTrue(split.length == 2, "Each comma-delimited mapping entry must have exactly one ':'");
try {
mappingsMap.put(split[0].trim(),
ClassUtils.forName(split[1].trim(), ClassUtils.getDefaultClassLoader()));
}
catch (ClassNotFoundException | LinkageError e) {
throw new IllegalArgumentException("Failed to load: " + split[1] + " for " + split[0], e);
}
}
return mappingsMap;
}
@Override
@Nullable
public byte[] serialize(String topic, Headers headers, @Nullable T data) {
if (data == null) {
return null;
}
if (this.addTypeInfo && headers != null) {
this.typeMapper.fromJavaType(this.objectMapper.constructType(data.getClass()), headers);
}
return serialize(topic, data);
}
@Override
@Nullable
public byte[] serialize(String topic, @Nullable T data) {
if (data == null) {
return null;
}
try {
return this.writer.writeValueAsBytes(data);
}
catch (IOException ex) {
throw new SerializationException("Can't serialize data [" + data + "] for topic [" + topic + "]", ex);
}
}
@Override
public void close() {
// No-op
}
/**
* Copies this serializer with same configuration, except new target type reference is used.
* @param newTargetType type reference forced for serialization, not null
* @param <X> new serialization source type
* @return new instance of serializer with type changes
* @since 2.6
*/
public <X> JsonSerializer<X> copyWithType(Class<? super X> newTargetType) {
return copyWithType(this.objectMapper.constructType(newTargetType));
}
/**
* Copies this serializer with same configuration, except new target type reference is used.
* @param newTargetType type reference forced for serialization, not null
* @param <X> new serialization source type
* @return new instance of serializer with type changes
* @since 2.6
*/
public <X> JsonSerializer<X> copyWithType(TypeReference<? super X> newTargetType) {
return copyWithType(this.objectMapper.constructType(newTargetType.getType()));
}
/**
* Copies this serializer with same configuration, except new target java type is used.
* @param newTargetType java type forced for serialization, not null
* @param <X> new serialization source type
* @return new instance of serializer with type changes
* @since 2.6
*/
public <X> JsonSerializer<X> copyWithType(JavaType newTargetType) {
JsonSerializer<X> result = new JsonSerializer<>(newTargetType, this.objectMapper);
result.addTypeInfo = this.addTypeInfo;
result.typeMapper = this.typeMapper;
result.typeMapperExplicitlySet = this.typeMapperExplicitlySet;
return result;
}
// Fluent API
/**
* Designate this serializer for serializing keys (default is values); only applies if
* the default type mapper is used.
* @return the serializer.
* @since 2.3
* @see #setUseTypeMapperForKey(boolean)
*/
public JsonSerializer<T> forKeys() {
setUseTypeMapperForKey(true);
return this;
}
/**
* Do not include type info headers.
* @return the serializer.
* @since 2.3
* @see #setAddTypeInfo(boolean)
*/
public JsonSerializer<T> noTypeInfo() {
setAddTypeInfo(false);
return this;
}
/**
* Use the supplied {@link Jackson2JavaTypeMapper}.
* @param mapper the mapper.
* @return the serializer.
* @since 2.3
* @see #setTypeMapper(Jackson2JavaTypeMapper)
*/
public JsonSerializer<T> typeMapper(Jackson2JavaTypeMapper mapper) {
setTypeMapper(mapper);
return this;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/JsonTypeResolver.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import org.apache.kafka.common.header.Headers;
import com.fasterxml.jackson.databind.JavaType;
/**
* Determine the {@link JavaType} from the topic/data/headers.
*
* @author Gary Russell
* @since 2.5.3
*
*/
@FunctionalInterface
public interface JsonTypeResolver {
/**
* Determine the type.
* @param topic the topic.
* @param data the serialized data.
* @param headers the headers.
* @return the type.
*/
JavaType resolveType(String topic, byte[] data, Headers headers);
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/ParseStringDeserializer.java | /*
* Copyright 2016-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import java.util.function.BiFunction;
import java.util.function.Function;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.serialization.Deserializer;
import org.springframework.util.Assert;
/**
* Generic {@link org.apache.kafka.common.serialization.Deserializer Deserializer} for deserialization of entity from
* its {@link String} representation received from Kafka (a.k.a parsing).
*
* @param <T> class of the entity, representing messages
*
* @author Alexei Klenin
* @author Gary Russell
* @since 2.5
*/
public class ParseStringDeserializer<T> implements Deserializer<T> {
/**
* Property for the key parser method.
*/
public static final String KEY_PARSER = "spring.message.key.parser";
/**
* Property for the key parser method.
*/
public static final String VALUE_PARSER = "spring.message.value.parser";
private static final BiFunction<String, Headers, ?> NO_PARSER = (str, headers) -> {
throw new IllegalStateException("A parser must be provided either via a constructor or consumer properties");
};
@SuppressWarnings("unchecked")
private BiFunction<String, Headers, T> parser = (BiFunction<String, Headers, T>) NO_PARSER;
private Charset charset = StandardCharsets.UTF_8;
/**
* Construct an instance with no parser function; a static method name must be
* provided in the consumer config {@link #KEY_PARSER} or {@link #VALUE_PARSER}
* properties.
*/
public ParseStringDeserializer() {
}
/**
* Construct an instance with the supplied parser function.
* @param parser the function.
*/
public ParseStringDeserializer(Function<String, T> parser) {
this.parser = (message, ignoredHeaders) -> parser.apply(message);
}
/**
* Construct an instance with the supplied parser function.
* @param parser the function.
*/
public ParseStringDeserializer(BiFunction<String, Headers, T> parser) {
this.parser = parser;
}
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
if (NO_PARSER.equals(this.parser)) {
String parserMethod = (String) configs.get(isKey ? KEY_PARSER : VALUE_PARSER);
Assert.state(parserMethod != null,
"A parser must be provided either via a constructor or consumer properties");
this.parser = SerializationUtils.propertyToMethodInvokingFunction(parserMethod, String.class,
getClass().getClassLoader());
}
}
@Override
public T deserialize(String topic, byte[] data) {
return deserialize(topic, null, data);
}
@Override
public T deserialize(String topic, Headers headers, byte[] data) {
return this.parser.apply(new String(data, this.charset), headers);
}
/**
* Set a charset to use when converting byte[] to {@link String}. Default UTF-8.
* @param charset the charset.
*/
public void setCharset(Charset charset) {
Assert.notNull(charset, "'charset' cannot be null");
this.charset = charset;
}
/**
* Get the configured charset.
* @return the charset.
*/
public Charset getCharset() {
return this.charset;
}
/**
* Get the configured parser function.
* @return the function.
*/
public BiFunction<String, Headers, T> getParser() {
return this.parser;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/RetryingDeserializer.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.util.Map;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.serialization.Deserializer;
import org.springframework.retry.RetryOperations;
import org.springframework.util.Assert;
/**
* A deserialzer configured with a delegate and a {@link RetryOperations} to retry
* deserialization in case of transient errors.
*
* @param <T> Type to be deserialized into.
*
* @author Gary Russell
* @since 2.3
*
*/
public class RetryingDeserializer<T> implements Deserializer<T> {
private final Deserializer<T> delegate;
private final RetryOperations retryOperations;
public RetryingDeserializer(Deserializer<T> delegate, RetryOperations retryOperations) {
Assert.notNull(delegate, "the 'delegate' deserializer cannot be null");
Assert.notNull(retryOperations, "the 'retryOperations' deserializer cannot be null");
this.delegate = delegate;
this.retryOperations = retryOperations;
}
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
this.delegate.configure(configs, isKey);
}
@Override
public T deserialize(String topic, byte[] data) {
return this.retryOperations.execute(context -> {
return this.delegate.deserialize(topic, data);
});
}
@Override
public T deserialize(String topic, Headers headers, byte[] data) {
return this.retryOperations.execute(context -> {
return this.delegate.deserialize(topic, headers, data);
});
}
@Override
public void close() {
this.delegate.close();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/SerializationUtils.java | /*
* Copyright 2020-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.function.BiFunction;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
/**
* Utilities for serialization.
*
* @author Gary Russell
* @since 2.5
*
*/
public final class SerializationUtils {
/**
* Header name for deserialization exceptions.
* @since 2.8
*/
public static final String DESERIALIZER_EXCEPTION_HEADER_PREFIX = "springDeserializerException";
/**
* Header name for deserialization exceptions.
* @since 2.8
*/
public static final String KEY_DESERIALIZER_EXCEPTION_HEADER = DESERIALIZER_EXCEPTION_HEADER_PREFIX + "Key";
/**
* Header name for deserialization exceptions.
* @since 2.8
*/
public static final String VALUE_DESERIALIZER_EXCEPTION_HEADER = DESERIALIZER_EXCEPTION_HEADER_PREFIX + "Value";
private SerializationUtils() {
}
/**
* Convert a property value (FQCN.methodName) to a {@link BiFunction} that takes a
* payload and headers and returns some value. The method must have parameters
* {@code (P, Headers)} or {@code (P)} and be declared as static.
* @param <P> The {@link BiFunction} first parameter type.
* @param <T> The {@link BiFunction} return type.
* @param methodProperty the method name property.
* @param payloadType the {@link BiFunction} first parameter type.
* @param classLoader the class loader.
* @return the function.
*/
@SuppressWarnings("unchecked")
public static <P, T> BiFunction<P, Headers, T> propertyToMethodInvokingFunction(String methodProperty,
Class<P> payloadType, ClassLoader classLoader) {
int lastDotPosn = methodProperty.lastIndexOf('.');
Assert.state(lastDotPosn > 1,
"the method property needs to be a class name followed by the method name, separated by '.'");
BiFunction<P, Headers, T> function;
Class<?> clazz;
try {
clazz = ClassUtils.forName(methodProperty.substring(0, lastDotPosn), classLoader);
}
catch (ClassNotFoundException | LinkageError e) {
throw new IllegalStateException(e);
}
String methodName = methodProperty.substring(lastDotPosn + 1);
Method method;
try {
method = clazz.getDeclaredMethod(methodName, payloadType, Headers.class);
}
catch (@SuppressWarnings("unused") NoSuchMethodException e) {
try {
method = clazz.getDeclaredMethod(methodName, payloadType);
}
catch (@SuppressWarnings("unused") NoSuchMethodException e1) {
IllegalStateException ise =
new IllegalStateException("the parser method must take '("
+ payloadType.getSimpleName()
+ ", Headers)' or '("
+ payloadType.getSimpleName()
+ ")'", e1);
ise.addSuppressed(e);
throw ise; // NOSONAR, lost stack trace
}
catch (SecurityException e1) {
IllegalStateException ise = new IllegalStateException(e1);
ise.addSuppressed(e);
throw ise; // NOSONAR, lost stack trace
}
}
catch (SecurityException e) {
throw new IllegalStateException(e);
}
Method parseMethod = method;
if (method.getParameters().length > 1) {
function = (str, headers) -> {
try {
return (T) parseMethod.invoke(null, str, headers);
}
catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
throw new IllegalStateException(e);
}
};
}
else {
function = (str, headers) -> {
try {
return (T) parseMethod.invoke(null, str);
}
catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
throw new IllegalStateException(e);
}
};
}
return function;
}
/**
* Populate the record headers with a serialized {@link DeserializationException}.
* @param headers the headers.
* @param data the data.
* @param ex the exception.
* @param isForKeyArg true if this is a key deserialization problem, otherwise value.
* @since 2.8
*/
public static void deserializationException(Headers headers, byte[] data, Exception ex, boolean isForKeyArg) {
ByteArrayOutputStream stream = new ByteArrayOutputStream();
DeserializationException exception =
new DeserializationException("failed to deserialize", data, isForKeyArg, ex);
try (ObjectOutputStream oos = new ObjectOutputStream(stream)) {
oos.writeObject(exception);
}
catch (IOException ioex) {
stream = new ByteArrayOutputStream();
try (ObjectOutputStream oos = new ObjectOutputStream(stream)) {
exception = new DeserializationException("failed to deserialize",
data, isForKeyArg, new RuntimeException("Could not deserialize type "
+ ioex.getClass().getName() + " with message " + ioex.getMessage()
+ " failure: " + ioex.getMessage()));
oos.writeObject(exception);
}
catch (IOException ex2) {
throw new IllegalStateException("Could not serialize a DeserializationException", ex2); // NOSONAR
}
}
headers.add(
new RecordHeader(isForKeyArg
? KEY_DESERIALIZER_EXCEPTION_HEADER
: VALUE_DESERIALIZER_EXCEPTION_HEADER,
stream.toByteArray()));
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/StringOrBytesSerializer.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.util.Map;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.kafka.common.utils.Bytes;
/**
* A serializer that can handle {@code byte[]}, {@link Bytes} and {@link String}.
* Convenient when used with one of the Json message converters.
*
* @author Gary Russell
* @since 2.3
*
*/
public class StringOrBytesSerializer implements Serializer<Object> {
private final StringSerializer stringSerializer = new StringSerializer();
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
this.stringSerializer.configure(configs, isKey);
}
@Override
public byte[] serialize(String topic, Object data) {
if (data instanceof byte[]) {
return (byte[]) data;
}
else if (data instanceof Bytes) {
return ((Bytes) data).get();
}
else if (data instanceof String) {
return this.stringSerializer.serialize(topic, (String) data);
}
else if (data == null) {
return null;
}
else {
throw new IllegalStateException("This serializer can only handle byte[], Bytes or String values");
}
}
@Override
public void close() {
this.stringSerializer.close();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/ToFromStringSerde.java | /*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.util.Map;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.Serde;
import org.apache.kafka.common.serialization.Serializer;
import org.springframework.util.Assert;
/**
* A Serde that delegates to a {@link ToStringSerializer} and
* {@link ParseStringDeserializer}.
*
* @param <T> the type.
*
* @author Gary Russell
* @since 2.5
*
*/
public class ToFromStringSerde<T> implements Serde<T> {
private final ToStringSerializer<T> toStringSerializer;
private final ParseStringDeserializer<T> fromStringDeserializer;
/**
* Construct an instance with the provided properties.
* @param toStringSerializer the {@link ToStringSerializer}.
* @param fromStringDeserializer the {@link ParseStringDeserializer}.
*/
public ToFromStringSerde(ToStringSerializer<T> toStringSerializer,
ParseStringDeserializer<T> fromStringDeserializer) {
Assert.notNull(toStringSerializer, "'toStringSerializer' must not be null.");
Assert.notNull(fromStringDeserializer, "'fromStringDeserializer' must not be null.");
this.toStringSerializer = toStringSerializer;
this.fromStringDeserializer = fromStringDeserializer;
}
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
this.toStringSerializer.configure(configs, isKey);
this.fromStringDeserializer.configure(configs, isKey);
}
@Override
public Serializer<T> serializer() {
return this.toStringSerializer;
}
@Override
public Deserializer<T> deserializer() {
return this.fromStringDeserializer;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/support/serializer/ToStringSerializer.java | /*
* Copyright 2016-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.support.serializer;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.serialization.Serializer;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
/**
* Generic {@link org.apache.kafka.common.serialization.Serializer Serializer} that relies on
* {@link Object#toString()} to get serialized representation of the entity.
*
* @param <T> class of the entity, representing messages
*
* @author Alexei Klenin
* @author Gary Russell
* @since 2.5
*/
public class ToStringSerializer<T> implements Serializer<T> {
/**
* Kafka config property for enabling/disabling adding type headers.
*/
public static final String ADD_TYPE_INFO_HEADERS = "spring.message.add.type.headers";
/**
* Header for the type of key.
*/
public static final String KEY_TYPE = "spring.message.key.type";
/**
* Header for the type of value.
*/
public static final String VALUE_TYPE = "spring.message.value.type";
private boolean addTypeInfo = true;
private Charset charset = StandardCharsets.UTF_8;
private String typeInfoHeader = VALUE_TYPE;
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
if (isKey) {
this.typeInfoHeader = KEY_TYPE;
}
if (configs.containsKey(ADD_TYPE_INFO_HEADERS)) {
Object config = configs.get(ADD_TYPE_INFO_HEADERS);
if (config instanceof Boolean) {
this.addTypeInfo = (Boolean) config;
}
else if (config instanceof String) {
this.addTypeInfo = Boolean.parseBoolean((String) config);
}
else {
throw new IllegalStateException(
ADD_TYPE_INFO_HEADERS + " must be Boolean or String");
}
}
}
@Override
public byte[] serialize(String topic, @Nullable T data) {
return serialize(topic, null, data);
}
@Override
@Nullable
public byte[] serialize(String topic, @Nullable Headers headers, @Nullable T data) {
if (data == null) {
return null;
}
if (this.addTypeInfo && headers != null) {
headers.add(this.typeInfoHeader, data.getClass().getName().getBytes());
}
return data.toString().getBytes(this.charset);
}
@Override
public void close() {
// No-op
}
/**
* Get the addTypeInfo property.
* @return the addTypeInfo
*/
public boolean isAddTypeInfo() {
return this.addTypeInfo;
}
/**
* Set to false to disable adding type info headers.
* @param addTypeInfo true to add headers
*/
public void setAddTypeInfo(boolean addTypeInfo) {
this.addTypeInfo = addTypeInfo;
}
/**
* Set a charset to use when converting {@link String} to byte[]. Default UTF-8.
* @param charset the charset.
*/
public void setCharset(Charset charset) {
Assert.notNull(charset, "'charset' cannot be null");
this.charset = charset;
}
/**
* Get the configured charset.
* @return the charset.
*/
public Charset getCharset() {
return this.charset;
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/transaction/ChainedKafkaTransactionManager.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.transaction;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.util.Assert;
/**
* A {@link org.springframework.data.transaction.ChainedTransactionManager} that has
* exactly one {@link KafkaAwareTransactionManager} in the chain.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.1.3
* @deprecated Refer to the
* {@link org.springframework.data.transaction.ChainedTransactionManager} javadocs.
*
*/
@Deprecated
public class ChainedKafkaTransactionManager<K, V> extends org.springframework.data.transaction.ChainedTransactionManager
implements KafkaAwareTransactionManager<K, V> {
private final KafkaAwareTransactionManager<K, V> kafkaTransactionManager;
/**
* Construct an instance with the provided {@link PlatformTransactionManager}s.
* @param transactionManagers the transaction managers.
*/
@SuppressWarnings("unchecked")
public ChainedKafkaTransactionManager(PlatformTransactionManager... transactionManagers) {
super(transactionManagers);
KafkaAwareTransactionManager<K, V> uniqueKafkaTransactionManager = null;
for (PlatformTransactionManager tm : transactionManagers) {
if (tm instanceof KafkaAwareTransactionManager) {
Assert.isNull(uniqueKafkaTransactionManager, "Only one KafkaAwareTransactionManager is allowed");
uniqueKafkaTransactionManager = (KafkaAwareTransactionManager<K, V>) tm;
}
}
Assert.notNull(uniqueKafkaTransactionManager, "Exactly one KafkaAwareTransactionManager is required");
this.kafkaTransactionManager = uniqueKafkaTransactionManager;
}
@Override
public ProducerFactory<K, V> getProducerFactory() {
return this.kafkaTransactionManager.getProducerFactory();
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/transaction/KafkaAwareTransactionManager.java | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.transaction;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.transaction.PlatformTransactionManager;
/**
* A transaction manager that can provide a {@link ProducerFactory}.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
* @since 2.1.3
*
*/
public interface KafkaAwareTransactionManager<K, V> extends PlatformTransactionManager {
/**
* Get the producer factory.
* @return the producerFactory
*/
ProducerFactory<K, V> getProducerFactory();
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/transaction/KafkaTransactionManager.java | /*
* Copyright 2017-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.transaction;
import java.time.Duration;
import org.springframework.kafka.core.KafkaResourceHolder;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.core.ProducerFactoryUtils;
import org.springframework.transaction.CannotCreateTransactionException;
import org.springframework.transaction.InvalidIsolationLevelException;
import org.springframework.transaction.TransactionDefinition;
import org.springframework.transaction.support.AbstractPlatformTransactionManager;
import org.springframework.transaction.support.DefaultTransactionStatus;
import org.springframework.transaction.support.SmartTransactionObject;
import org.springframework.transaction.support.TransactionSynchronizationManager;
import org.springframework.util.Assert;
/**
* {@link org.springframework.transaction.PlatformTransactionManager} implementation for a
* single Kafka {@link ProducerFactory}. Binds a Kafka producer from the specified
* ProducerFactory to the thread, potentially allowing for one thread-bound producer per
* ProducerFactory.
*
* <p>
* This local strategy is an alternative to executing Kafka operations within, and
* synchronized with, external transactions. This strategy is <i>not</i> able to provide
* XA transactions, for example in order to share transactions between messaging and
* database access.
*
* <p>
* Application code is required to retrieve the transactional Kafka resources via
* {@link ProducerFactoryUtils#getTransactionalResourceHolder(ProducerFactory, String, java.time.Duration)}.
* Spring's {@link org.springframework.kafka.core.KafkaTemplate KafkaTemplate} will auto
* detect a thread-bound Producer and automatically participate in it.
*
* <p>
* <b>The use of {@link org.springframework.kafka.core.DefaultKafkaProducerFactory
* DefaultKafkaProducerFactory} as a target for this transaction manager is strongly
* recommended.</b> Because it caches producers for reuse.
*
* <p>
* Transaction synchronization is turned off by default, as this manager might be used
* alongside a datastore-based Spring transaction manager such as the JDBC
* org.springframework.jdbc.datasource.DataSourceTransactionManager, which has stronger
* needs for synchronization.
*
* @param <K> the key type.
* @param <V> the value type.
*
* @author Gary Russell
*/
@SuppressWarnings("serial")
public class KafkaTransactionManager<K, V> extends AbstractPlatformTransactionManager
implements KafkaAwareTransactionManager<K, V> {
private static final String UNCHECKED = "unchecked";
private final ProducerFactory<K, V> producerFactory;
private String transactionIdPrefix;
private Duration closeTimeout = ProducerFactoryUtils.DEFAULT_CLOSE_TIMEOUT;
/**
* Create a new KafkaTransactionManager, given a ProducerFactory.
* Transaction synchronization is turned off by default, as this manager might be used alongside a datastore-based
* Spring transaction manager like DataSourceTransactionManager, which has stronger needs for synchronization. Only
* one manager is allowed to drive synchronization at any point of time.
* @param producerFactory the ProducerFactory to use
*/
public KafkaTransactionManager(ProducerFactory<K, V> producerFactory) {
Assert.notNull(producerFactory, "The 'ProducerFactory' cannot be null");
Assert.isTrue(producerFactory.transactionCapable(), "The 'ProducerFactory' must support transactions");
setTransactionSynchronization(SYNCHRONIZATION_NEVER);
this.producerFactory = producerFactory;
}
/**
* Set a transaction id prefix to override the prefix in the producer factory.
* @param transactionIdPrefix the prefix.
* @since 2.3
*/
public void setTransactionIdPrefix(String transactionIdPrefix) {
this.transactionIdPrefix = transactionIdPrefix;
}
/**
* Get the producer factory.
* @return the producerFactory
*/
@Override
public ProducerFactory<K, V> getProducerFactory() {
return this.producerFactory;
}
/**
* Set the maximum time to wait when closing a producer; default 5 seconds.
* @param closeTimeout the close timeout.
* @since 2.1.14
*/
public void setCloseTimeout(Duration closeTimeout) {
Assert.notNull(closeTimeout, "'closeTimeout' cannot be null");
this.closeTimeout = closeTimeout;
}
@SuppressWarnings(UNCHECKED)
@Override
protected Object doGetTransaction() {
KafkaTransactionObject<K, V> txObject = new KafkaTransactionObject<K, V>();
txObject.setResourceHolder((KafkaResourceHolder<K, V>) TransactionSynchronizationManager
.getResource(getProducerFactory()));
return txObject;
}
@Override
protected boolean isExistingTransaction(Object transaction) {
@SuppressWarnings(UNCHECKED)
KafkaTransactionObject<K, V> txObject = (KafkaTransactionObject<K, V>) transaction;
return (txObject.getResourceHolder() != null);
}
@Override
protected void doBegin(Object transaction, TransactionDefinition definition) {
if (definition.getIsolationLevel() != TransactionDefinition.ISOLATION_DEFAULT) {
throw new InvalidIsolationLevelException("Apache Kafka does not support an isolation level concept");
}
@SuppressWarnings(UNCHECKED)
KafkaTransactionObject<K, V> txObject = (KafkaTransactionObject<K, V>) transaction;
KafkaResourceHolder<K, V> resourceHolder = null;
try {
resourceHolder = ProducerFactoryUtils.getTransactionalResourceHolder(getProducerFactory(),
this.transactionIdPrefix, this.closeTimeout);
if (logger.isDebugEnabled()) {
logger.debug("Created Kafka transaction on producer [" + resourceHolder.getProducer() + "]");
}
txObject.setResourceHolder(resourceHolder);
txObject.getResourceHolder().setSynchronizedWithTransaction(true);
int timeout = determineTimeout(definition);
if (timeout != TransactionDefinition.TIMEOUT_DEFAULT) {
txObject.getResourceHolder().setTimeoutInSeconds(timeout);
}
}
catch (Exception ex) {
if (resourceHolder != null) {
ProducerFactoryUtils.releaseResources(resourceHolder);
}
throw new CannotCreateTransactionException("Could not create Kafka transaction", ex);
}
}
@Override
protected Object doSuspend(Object transaction) {
@SuppressWarnings(UNCHECKED)
KafkaTransactionObject<K, V> txObject = (KafkaTransactionObject<K, V>) transaction;
txObject.setResourceHolder(null);
return TransactionSynchronizationManager.unbindResource(getProducerFactory());
}
@Override
protected void doResume(Object transaction, Object suspendedResources) {
@SuppressWarnings(UNCHECKED)
KafkaResourceHolder<K, V> producerHolder = (KafkaResourceHolder<K, V>) suspendedResources;
TransactionSynchronizationManager.bindResource(getProducerFactory(), producerHolder);
}
@Override
protected void doCommit(DefaultTransactionStatus status) {
@SuppressWarnings(UNCHECKED)
KafkaTransactionObject<K, V> txObject = (KafkaTransactionObject<K, V>) status.getTransaction();
KafkaResourceHolder<K, V> resourceHolder = txObject.getResourceHolder();
resourceHolder.commit();
}
@Override
protected void doRollback(DefaultTransactionStatus status) {
@SuppressWarnings(UNCHECKED)
KafkaTransactionObject<K, V> txObject = (KafkaTransactionObject<K, V>) status.getTransaction();
KafkaResourceHolder<K, V> resourceHolder = txObject.getResourceHolder();
resourceHolder.rollback();
}
@Override
protected void doSetRollbackOnly(DefaultTransactionStatus status) {
@SuppressWarnings(UNCHECKED)
KafkaTransactionObject<K, V> txObject = (KafkaTransactionObject<K, V>) status.getTransaction();
txObject.getResourceHolder().setRollbackOnly();
}
@Override
protected void doCleanupAfterCompletion(Object transaction) {
@SuppressWarnings(UNCHECKED)
KafkaTransactionObject<K, V> txObject = (KafkaTransactionObject<K, V>) transaction;
TransactionSynchronizationManager.unbindResource(getProducerFactory());
txObject.getResourceHolder().close();
txObject.getResourceHolder().clear();
}
/**
* Kafka transaction object, representing a KafkaResourceHolder. Used as transaction object by
* KafkaTransactionManager.
* @see KafkaResourceHolder
*/
private static class KafkaTransactionObject<K, V> implements SmartTransactionObject {
private KafkaResourceHolder<K, V> resourceHolder;
KafkaTransactionObject() {
}
public void setResourceHolder(KafkaResourceHolder<K, V> resourceHolder) {
this.resourceHolder = resourceHolder;
}
public KafkaResourceHolder<K, V> getResourceHolder() {
return this.resourceHolder;
}
@Override
public boolean isRollbackOnly() {
return this.resourceHolder.isRollbackOnly();
}
@Override
public void flush() {
// no-op
}
}
}
|
0 | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka | java-sources/ai/superstream/spring-kafka/2.8.4-alpha6/org/springframework/kafka/transaction/package-info.java | /**
* Provides classes related to transactions.
*/
package org.springframework.kafka.transaction;
|
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/agent/KafkaProducerCloseInterceptor.java | package ai.superstream.agent;
import ai.superstream.util.SuperstreamLogger;
import net.bytebuddy.asm.Advice;
/**
* Intercepts {@code KafkaProducer.close(..)} so that Superstream stops collecting
* metrics and sending statistics for that producer instance after the
* application has closed it.
*/
public class KafkaProducerCloseInterceptor {
// Must be non-private because instrumented KafkaProducer.close() accesses this field via generated bytecode.
public static final SuperstreamLogger logger = SuperstreamLogger.getLogger(KafkaProducerCloseInterceptor.class);
@Advice.OnMethodEnter
public static void onEnter(@Advice.This Object producer) {
if (KafkaProducerInterceptor.isDisabled()) {
return;
}
// Delegate to the main interceptor helper which handles thread-safety and
// registry updates. We log only the first successful deactivation to
// avoid duplicate messages when close() delegates internally.
try {
// KafkaProducer has three close(...) overloads that delegate to each other
// (close(), close(Duration), close(long, TimeUnit)). Our agent advice is
// woven into *all* of them, so this method is invoked once per layer of
// delegation. We therefore rely on markProducerClosed() to tell us
// whether this is the *first* invocation for the given producer object
// and suppress further logging when it returns false.
KafkaProducerInterceptor.markProducerClosed(producer);
} catch (Throwable ignored) {
// We swallow any error so that we never affect the application's own
// close() behaviour.
}
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/agent/KafkaProducerInterceptor.java | package ai.superstream.agent;
import ai.superstream.core.ClientStatsReporter;
import ai.superstream.core.SuperstreamManager;
import ai.superstream.model.MetadataMessage;
import ai.superstream.util.SuperstreamLogger;
import ai.superstream.util.ClientUtils;
import net.bytebuddy.asm.Advice;
import java.util.AbstractMap;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.Properties;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.lang.ThreadLocal;
import java.util.List;
import java.util.Arrays;
/**
* Intercepts KafkaProducer constructor calls to optimize configurations and
* collect metrics.
*/
public class KafkaProducerInterceptor {
public static final SuperstreamLogger logger = SuperstreamLogger.getLogger(KafkaProducerInterceptor.class);
// Constant for Superstream library prefix
public static final String SUPERSTREAM_LIBRARY_PREFIX = "superstreamlib-";
// Environment variable to check if Superstream is disabled
private static final String DISABLED_ENV_VAR = "SUPERSTREAM_DISABLED";
public static final boolean DISABLED = Boolean.parseBoolean(System.getenv(DISABLED_ENV_VAR));
// Map to store client stats reporters for each producer
public static final ConcurrentHashMap<String, ClientStatsReporter> clientStatsReporters = new ConcurrentHashMap<>();
// Map to store producer metrics info by producer ID
public static final ConcurrentHashMap<String, ProducerMetricsInfo> producerMetricsMap = new ConcurrentHashMap<>();
// Single shared metrics collector for all producers
public static final SharedMetricsCollector sharedCollector = new SharedMetricsCollector();
// ThreadLocal stack to track nested KafkaProducer constructor calls per thread
// This ensures that when the Superstream optimization logic creates its own producer
// inside the application's producer construction, we still match the correct
// properties object on exit (LIFO order).
public static final ThreadLocal<java.util.Deque<Properties>> TL_PROPS_STACK =
ThreadLocal.withInitial(java.util.ArrayDeque::new);
// ThreadLocal stack to hold the producer UUIDs generated in onEnter so that the same
// value can be reused later in onExit (for stats reporting) and by SuperstreamManager
// when it reports the client information. The stack is aligned with the TL_PROPS_STACK
// (push in onEnter, pop in onExit).
public static final ThreadLocal<java.util.Deque<String>> TL_UUID_STACK =
ThreadLocal.withInitial(java.util.ArrayDeque::new);
// ThreadLocal stack to pass original/optimized configuration maps from optimization phase to reporter creation.
public static final ThreadLocal<java.util.Deque<ConfigInfo>> TL_CFG_STACK =
ThreadLocal.withInitial(java.util.ArrayDeque::new);
// Static initializer to start the shared collector if enabled
static {
if (!DISABLED) {
try {
sharedCollector.start();
} catch (Exception e) {
logger.error("[ERR-001] Failed to start metrics collector: {}", e.getMessage(), e);
}
} else {
logger.warn("Superstream is disabled via SUPERSTREAM_DISABLED environment variable");
}
}
/**
* Check if Superstream is disabled via environment variable.
*
* @return true if Superstream is disabled, false otherwise
*/
public static boolean isDisabled() {
return DISABLED;
}
/**
* Called before the KafkaProducer constructor.
* Used to optimize producer configurations.
*
* @param args The producer properties
*/
@Advice.OnMethodEnter
public static void onEnter(@Advice.AllArguments Object[] args) {
// Skip if Superstream is disabled via environment variable
if (isDisabled()) {
return;
}
// Check if this is a direct call from application code or an internal
// delegation
if (!isInitialProducerCreation()) {
return;
}
// Extract Properties or Map from the arguments and push onto the stack
Properties properties = extractProperties(args);
if (properties != null) {
// Replace the original Map argument with our Properties instance so that
// the KafkaProducer constructor reads the optimised values.
for (int i = 0; i < args.length; i++) {
Object a = args[i];
if (a instanceof java.util.Map && !(a instanceof java.util.Properties)) {
args[i] = properties;
break;
}
}
TL_PROPS_STACK.get().push(properties);
// Generate a UUID for this upcoming producer instance and push onto UUID stack
String producerUuid = java.util.UUID.randomUUID().toString();
TL_UUID_STACK.get().push(producerUuid);
} else {
logger.error("[ERR-002] Could not extract properties from producer arguments");
// here we can not report the error to the clients topic as we were not able to extract the properties
return;
}
// Detect immutable Map argument (e.g., Collections.unmodifiableMap) so we can skip optimisation early
boolean immutableConfigDetected = false;
java.util.Map<String,Object> immutableOriginalMap = null;
for (Object arg : args) {
if (arg instanceof java.util.Map) {
@SuppressWarnings("unchecked")
java.util.Map<String,Object> testMap = (java.util.Map<String,Object>) arg;
// Skip empty maps as they might be immutable but don't matter for optimization
if (!testMap.isEmpty()) {
try {
String testKey = "__superstream_immutable_test_" + System.nanoTime();
testMap.put(testKey, "test");
testMap.remove(testKey);
} catch (UnsupportedOperationException | IllegalStateException e) {
// Map is immutable
immutableConfigDetected = true;
immutableOriginalMap = testMap;
break;
}
}
}
}
try {
// Skip if we're already in the process of optimizing
if (SuperstreamManager.isOptimizationInProgress()) {
return;
}
if (properties == null || properties.isEmpty()) {
logger.error("[ERR-003] Could not extract properties from properties");
return;
}
// Skip producers created by the Superstream library
String clientId = properties.getProperty("client.id", "");
if (clientId.startsWith(SUPERSTREAM_LIBRARY_PREFIX)) {
logger.debug("Skipping optimization for Superstream internal producer: {}", clientId);
return;
}
// Extract bootstrap servers
String bootstrapServers = properties.getProperty("bootstrap.servers");
if (bootstrapServers == null || bootstrapServers.trim().isEmpty()) {
logger.error("[ERR-004] bootstrap.servers is not set, cannot optimize");
return;
}
if (immutableConfigDetected && immutableOriginalMap != null) {
String errMsg = String.format("[ERR-010] Cannot optimize KafkaProducer configuration: received an unmodifiable Map (%s). Please pass a mutable java.util.Properties or java.util.Map instead.",
immutableOriginalMap.getClass().getName());
logger.error(errMsg);
// Push ConfigInfo with error and original config for stats reporting
java.util.Deque<ConfigInfo> cfgStack = TL_CFG_STACK.get();
cfgStack.push(new ConfigInfo(propertiesToMap(properties), new java.util.HashMap<>(), errMsg));
// Do NOT attempt optimisation
return;
}
// Store original properties to restore in case of failure
java.util.Map<String, Object> originalPropertiesMap = propertiesToMap(properties);
Properties originalProperties = new Properties();
originalProperties.putAll(properties);
try {
// Optimize the producer
boolean optimized = SuperstreamManager.getInstance().optimizeProducer(bootstrapServers, clientId, properties);
if (!optimized) {
// Restore original properties if optimization was not successful
properties.putAll(originalProperties);
// Push ConfigInfo with original config for stats reporting
java.util.Deque<ConfigInfo> cfgStack = TL_CFG_STACK.get();
// Check if there's an existing ConfigInfo with an error
ConfigInfo existingConfig = cfgStack.isEmpty() ? null : cfgStack.peek();
String error = existingConfig != null ? existingConfig.error : null;
cfgStack.push(new ConfigInfo(originalPropertiesMap, new java.util.HashMap<>(), error));
}
} catch (Exception e) {
logger.error("[ERR-053] Error during producer optimization: {}", e.getMessage(), e);
// Restore original properties in case of failure
properties.putAll(originalProperties);
// Push ConfigInfo with original config for stats reporting
java.util.Deque<ConfigInfo> cfgStack = TL_CFG_STACK.get();
cfgStack.push(new ConfigInfo(originalPropertiesMap, new java.util.HashMap<>()));
}
} catch (Exception e) {
logger.error("[ERR-053] Error during producer optimization: {}", e.getMessage(), e);
}
}
/**
* Called after the KafkaProducer constructor.
* Used to register the producer for metrics collection.
*
* @param producer The KafkaProducer instance that was just created
*/
@Advice.OnMethodExit
public static void onExit(@Advice.This Object producer) {
// Skip if Superstream is disabled via environment variable
if (isDisabled()) {
return;
}
try {
// Process only for the outer-most constructor call
if (!isInitialProducerCreation()) {
return;
}
java.util.Deque<Properties> stack = TL_PROPS_STACK.get();
if (stack.isEmpty()) {
logger.error("[ERR-006] No captured properties for this producer constructor; skipping stats reporter setup");
return;
}
Properties producerProps = stack.pop();
// Retrieve matching UUID for this constructor instance
java.util.Deque<String> uuidStack = TL_UUID_STACK.get();
String producerUuid = "";
if (!uuidStack.isEmpty()) {
producerUuid = uuidStack.pop();
} else {
logger.error("[ERR-127] No producer UUID found for this constructor instance");
}
// Clean up ThreadLocal when outer-most constructor finishes
if (stack.isEmpty()) {
TL_PROPS_STACK.remove();
TL_UUID_STACK.remove();
}
String bootstrapServers = producerProps.getProperty("bootstrap.servers");
if (bootstrapServers == null || bootstrapServers.isEmpty()) {
logger.error("[ERR-007] bootstrap.servers missing in captured properties; skipping reporter setup");
return;
}
String rawClientId = producerProps.getProperty("client.id"); // may be null or empty
// Skip internal library producers (identified by client.id prefix)
if (rawClientId != null && rawClientId.startsWith(SUPERSTREAM_LIBRARY_PREFIX)) {
return;
}
// Use the JVM identity hash to create a unique key per producer instance
String producerId = "producer-" + System.identityHashCode(producer);
// The client ID to be reported is the raw value (may be null or empty, that's OK)
String clientIdForStats = rawClientId != null ? rawClientId : "";
// Only register if we don't already have this producer instance
if (!producerMetricsMap.containsKey(producerId)) {
logger.debug("Registering producer with metrics collector: {} (client.id='{}')", producerId, clientIdForStats);
// Create a reporter for this producer instance – pass the original client.id
ClientStatsReporter reporter = new ClientStatsReporter(bootstrapServers, producerProps, clientIdForStats, producerUuid);
// Set the most impactful topic if possible
try {
MetadataMessage metadataMessage = null;
List<String> topics = null;
// Try to get metadata and topics if available
if (producerProps != null) {
String bootstrapServersProp = producerProps.getProperty("bootstrap.servers");
if (bootstrapServersProp != null) {
AbstractMap.SimpleEntry<MetadataMessage, String> metadataResult = SuperstreamManager.getInstance().getOrFetchMetadataMessage(bootstrapServersProp, producerProps);
metadataMessage = metadataResult.getKey();
}
String topicsEnv = System.getenv("SUPERSTREAM_TOPICS_LIST");
if (topicsEnv != null && !topicsEnv.trim().isEmpty()) {
topics = Arrays.asList(topicsEnv.split(","));
}
}
if (metadataMessage != null && topics != null) {
String mostImpactfulTopic = SuperstreamManager.getInstance().getConfigurationOptimizer().getMostImpactfulTopicName(metadataMessage, topics);
if (mostImpactfulTopic == null) {
mostImpactfulTopic = "";
}
reporter.updateMostImpactfulTopic(mostImpactfulTopic);
}
} catch (Exception e) {
logger.debug("Failed to get most impactful topic: {}", e.getMessage());
}
// Create metrics info for this producer
ProducerMetricsInfo metricsInfo = new ProducerMetricsInfo(producer, reporter);
// Register with the shared collector
producerMetricsMap.put(producerId, metricsInfo);
clientStatsReporters.put(producerId, reporter);
// Pop configuration info from ThreadLocal stack (if any) and attach to reporter
java.util.Deque<ConfigInfo> cfgStack = TL_CFG_STACK.get();
ConfigInfo cfgInfo = cfgStack.isEmpty()? null : cfgStack.pop();
if (cfgStack.isEmpty()) {
TL_CFG_STACK.remove();
}
if (cfgInfo != null) {
// Use the original configuration from ConfigInfo and get complete config with defaults
java.util.Map<String, Object> completeConfig = ClientUtils.getCompleteProducerConfig(cfgInfo.originalConfig);
java.util.Map<String, Object> optimizedConfig = cfgInfo.optimizedConfig != null ? cfgInfo.optimizedConfig : new java.util.HashMap<>();
reporter.setConfigurations(completeConfig, optimizedConfig);
// If optimizedConfig is empty and there is an error, set the error on the reporter
if (optimizedConfig.isEmpty() && cfgInfo.error != null && !cfgInfo.error.isEmpty()) {
reporter.updateError(cfgInfo.error);
}
} else {
// No ConfigInfo available, so no optimization was performed
// Use the producer properties as both original and optimized (since no changes were made)
java.util.Map<String, Object> originalPropsMap = propertiesToMap(producerProps);
java.util.Map<String, Object> completeConfig = ClientUtils.getCompleteProducerConfig(originalPropsMap);
reporter.setConfigurations(completeConfig, new java.util.HashMap<>());
}
// Trigger immediate metrics collection for this producer
try {
sharedCollector.collectMetricsForProducer(producerId, metricsInfo);
} catch (Exception e) {
logger.error("[ERR-047] Failed to collect immediate metrics for new producer {}: {}", producerId, e.getMessage(), e);
}
logger.debug("Producer {} registered with shared metrics collector", producerId);
}
} catch (Exception e) {
logger.error("[ERR-008] Error registering producer with metrics collector: {}", e.getMessage(), e);
}
}
/**
* Extract Properties object from constructor arguments.
*/
public static Properties extractProperties(Object[] args) {
// Look for Properties or Map in the arguments
if (args == null) {
logger.error("[ERR-009] extractProperties: args array is null");
return null;
}
logger.debug("extractProperties: Processing {} arguments", args.length);
for (Object arg : args) {
if (arg == null) {
logger.debug("extractProperties: Found null argument");
continue;
}
String className = arg.getClass().getName();
logger.debug("extractProperties: Processing argument of type: {}", className);
if (arg instanceof Properties) {
logger.debug("extractProperties: Found Properties object");
Properties props = (Properties) arg;
normalizeBootstrapServers(props);
return props;
}
if (arg instanceof Map) {
logger.debug("extractProperties: Found Map object of type: {}", arg.getClass().getName());
// If the map is unmodifiable we cannot actually modify it later; we still let the caller decide
try {
@SuppressWarnings("unchecked")
Map<String, Object> map = (Map<String, Object>) arg;
Properties props = new MapBackedProperties(map);
logger.debug("extractProperties: Successfully converted Map to Properties");
return props;
} catch (ClassCastException e) {
// Not the map type we expected
logger.error("[ERR-011] extractProperties: Could not cast Map to Map<String, Object>: {}", e.getMessage(), e);
return null;
}
}
// Handle ProducerConfig object which contains properties
if (className.endsWith("ProducerConfig")) {
logger.debug("extractProperties: Found ProducerConfig object");
try {
// Try multiple possible field names
String[] fieldNames = { "originals", "values", "props", "properties", "configs" };
for (String fieldName : fieldNames) {
try {
Field field = arg.getClass().getDeclaredField(fieldName);
field.setAccessible(true);
Object fieldValue = field.get(arg);
logger.debug("extractProperties: Found field {} with value type: {}", fieldName,
fieldValue != null ? fieldValue.getClass().getName() : "null");
if (fieldValue instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> map = (Map<String, Object>) fieldValue;
Properties props = new MapBackedProperties(map);
logger.debug(
"extractProperties: Successfully converted ProducerConfig field {} to Properties",
fieldName);
return props;
} else if (fieldValue instanceof Properties) {
logger.debug("extractProperties: Found Properties in ProducerConfig field {}",
fieldName);
Properties props = (Properties) fieldValue;
normalizeBootstrapServers(props);
return props;
}
} catch (NoSuchFieldException e) {
// Field doesn't exist, try the next one
logger.error("[ERR-017] extractProperties: Field {} not found in ProducerConfig", fieldName);
continue;
}
}
// Try to call getters if field access failed
logger.debug("extractProperties: Trying getter methods for ProducerConfig");
for (Method method : arg.getClass().getMethods()) {
if ((method.getName().equals("originals") ||
method.getName().equals("values") ||
method.getName().equals("configs") ||
method.getName().equals("properties") ||
method.getName().equals("getOriginals") ||
method.getName().equals("getValues") ||
method.getName().equals("getConfigs") ||
method.getName().equals("getProperties")) &&
method.getParameterCount() == 0) {
Object result = method.invoke(arg);
logger.debug("extractProperties: Called method {} with result type: {}", method.getName(),
result != null ? result.getClass().getName() : "null");
if (result instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> map = (Map<String, Object>) result;
Properties props = new MapBackedProperties(map);
logger.debug(
"extractProperties: Successfully converted ProducerConfig method {} result to Properties",
method.getName());
return props;
} else if (result instanceof Properties) {
logger.debug("extractProperties: Found Properties in ProducerConfig method {} result",
method.getName());
Properties props = (Properties) result;
normalizeBootstrapServers(props);
return props;
}
}
}
// Last resort: Try to get the ProducerConfig's bootstrap.servers value
// and create a minimal Properties object
logger.debug("extractProperties: Trying last resort method to get bootstrap.servers");
for (Method method : arg.getClass().getMethods()) {
if (method.getName().equals("getString") && method.getParameterCount() == 1) {
try {
String bootstrapServers = (String) method.invoke(arg, "bootstrap.servers");
String clientId = (String) method.invoke(arg, "client.id");
if (bootstrapServers != null) {
Properties minProps = new Properties();
minProps.put("bootstrap.servers", bootstrapServers);
if (clientId != null) {
minProps.put("client.id", clientId);
}
logger.debug(
"extractProperties: Created minimal Properties with bootstrap.servers and client.id");
return minProps;
}
} catch (Exception e) {
logger.debug("extractProperties: Failed to get bootstrap.servers from ProducerConfig",
e);
}
}
}
} catch (Exception e) {
logger.error("[ERR-018] extractProperties: Failed to extract properties from ProducerConfig: {}",
e.getMessage(), e);
return null;
}
}
}
logger.error("[ERR-019] extractProperties: No valid configuration object found in arguments");
return null;
}
/**
* Ensure that the bootstrap.servers property is stored as a comma-separated String even when
* the user supplied it as a Collection (or array) inside a java.util.Properties instance.
* This keeps the rest of the optimisation pipeline – which relies on getProperty(String) – working.
*/
private static void normalizeBootstrapServers(Properties props) {
if (props == null) {
return;
}
Object bsObj = props.get("bootstrap.servers");
if (bsObj == null) {
return;
}
String joined = null;
if (bsObj instanceof java.util.Collection) {
java.util.Collection<?> col = (java.util.Collection<?>) bsObj;
StringBuilder sb = new StringBuilder();
for (Object o : col) {
if (o == null) continue;
if (sb.length() > 0) sb.append(',');
sb.append(o.toString());
}
joined = sb.toString();
} else if (bsObj.getClass().isArray()) {
int len = java.lang.reflect.Array.getLength(bsObj);
StringBuilder sb = new StringBuilder();
for (int i = 0; i < len; i++) {
Object o = java.lang.reflect.Array.get(bsObj, i);
if (o == null) continue;
if (sb.length() > 0) sb.append(',');
sb.append(o.toString());
}
joined = sb.toString();
}
if (joined != null && !joined.isEmpty()) {
props.put("bootstrap.servers", joined);
}
}
/**
* Determines if this constructor call is the initial creation from application
* code
* rather than an internal delegation between constructors.
*/
public static boolean isInitialProducerCreation() {
StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
// Start from index 1 to skip getStackTrace() itself
boolean foundKafkaProducer = false;
int kafkaProducerCount = 0;
for (int i = 1; i < stackTrace.length; i++) {
String className = stackTrace[i].getClassName();
// Only treat the *actual* KafkaProducer class (or its anonymous / inner classes) as a match.
// This avoids counting user subclasses such as TemplateKafkaProducer which also end with the
// same suffix and would otherwise be mistaken for an internal constructor delegation.
String simpleName;
int lastDotIdx = className.lastIndexOf('.');
simpleName = (lastDotIdx >= 0) ? className.substring(lastDotIdx + 1) : className;
boolean isKafkaProducerClass = simpleName.equals("KafkaProducer") || simpleName.startsWith("KafkaProducer$");
if (isKafkaProducerClass) {
foundKafkaProducer = true;
kafkaProducerCount++;
// If we find more than one KafkaProducer in the stack, it's a delegation
if (kafkaProducerCount > 1) {
return false;
}
}
// Once we've seen KafkaProducer and then see a different class,
// we've found the actual caller
else if (foundKafkaProducer) {
// Skip certain framework classes that might wrap the call
if (className.startsWith("java.") ||
className.startsWith("javax.") ||
className.startsWith("sun.") ||
className.startsWith("com.sun.")) {
continue;
}
// We've found the application class that called KafkaProducer
logger.debug("Detected initial producer creation from: " + className);
return true;
}
}
// If we make it here with exactly one KafkaProducer in the stack, it's likely
// the initial creation (first constructor being called)
return kafkaProducerCount == 1;
}
/**
* Helper utility to extract a field value using reflection.
*/
public static Object extractFieldValue(Object obj, String... fieldNames) {
if (obj == null)
return null;
for (String fieldName : fieldNames) {
try {
Field field = findField(obj.getClass(), fieldName);
if (field != null) {
field.setAccessible(true);
Object value = field.get(obj);
if (value != null) {
return value;
}
}
} catch (Exception e) {
// Ignore and try next field
}
}
return null;
}
/**
* Find a field in a class or its superclasses.
*/
public static Field findField(Class<?> clazz, String fieldName) {
if (clazz == null || clazz == Object.class)
return null;
try {
return clazz.getDeclaredField(fieldName);
} catch (NoSuchFieldException e) {
return findField(clazz.getSuperclass(), fieldName);
}
}
/**
* Find a method in a class or its superclasses.
*/
public static Method findMethod(Class<?> clazz, String methodName) {
if (clazz == null || clazz == Object.class)
return null;
try {
return clazz.getDeclaredMethod(methodName);
} catch (NoSuchMethodException e) {
return findMethod(clazz.getSuperclass(), methodName);
}
}
/**
* Find a method in a class or its superclasses, trying multiple method names.
*/
public static Method findMethod(Class<?> clazz, String... methodNames) {
for (String methodName : methodNames) {
Method method = findMethod(clazz, methodName);
if (method != null) {
return method;
}
}
return null;
}
/**
* Holds metrics information for a single producer.
*/
public static class ProducerMetricsInfo {
private final Object producer;
private final ClientStatsReporter reporter;
private final AtomicReference<CompressionStats> lastStats = new AtomicReference<>(new CompressionStats(0, 0));
private final AtomicBoolean isActive = new AtomicBoolean(true);
public ProducerMetricsInfo(Object producer, ClientStatsReporter reporter) {
this.producer = producer;
this.reporter = reporter;
}
public Object getProducer() {
return producer;
}
public ClientStatsReporter getReporter() {
return reporter;
}
public CompressionStats getLastStats() {
return lastStats.get();
}
public void updateLastStats(CompressionStats stats) {
lastStats.set(stats);
}
public boolean isActive() {
return isActive.get();
}
}
/**
* A singleton class that collects Kafka metrics periodically for all registered
* producers using a single shared thread. This is more efficient than having
* one thread
* per producer.
*/
public static class SharedMetricsCollector {
private final ScheduledExecutorService scheduler;
private final AtomicBoolean running = new AtomicBoolean(false);
private static final long COLLECTION_INTERVAL_MS = 30000; // 30 seconds
public SharedMetricsCollector() {
this.scheduler = Executors.newSingleThreadScheduledExecutor(r -> {
Thread t = new Thread(r, "superstream-kafka-metrics-collector");
t.setDaemon(true);
return t;
});
}
/**
* Start collecting metrics periodically for all registered producers.
*/
public void start() {
if (running.compareAndSet(false, true)) {
logger.debug("Starting shared Kafka metrics collector with interval {} ms", COLLECTION_INTERVAL_MS);
try {
scheduler.scheduleAtFixedRate(this::collectAllMetrics,
COLLECTION_INTERVAL_MS / 2, // Start sooner for first collection
COLLECTION_INTERVAL_MS,
TimeUnit.MILLISECONDS);
} catch (Exception e) {
logger.error("[ERR-012] Failed to schedule metrics collection: {}", e.getMessage(), e);
running.set(false);
}
} else {
logger.debug("Metrics collector already running");
}
}
/**
* Collect metrics from all registered producers.
*/
public void collectAllMetrics() {
try {
// Skip if disabled
if (isDisabled()) {
return;
}
int totalProducers = producerMetricsMap.size();
if (totalProducers == 0) {
logger.debug("No producers registered for metrics collection");
return;
}
logger.debug("Starting metrics collection cycle for {} producers", totalProducers);
int successCount = 0;
int skippedCount = 0;
// Iterate through all registered producers
for (Map.Entry<String, ProducerMetricsInfo> entry : producerMetricsMap.entrySet()) {
String producerId = entry.getKey();
ProducerMetricsInfo info = entry.getValue();
// Skip inactive producers
if (!info.isActive()) {
skippedCount++;
continue;
}
try {
boolean success = collectMetricsForProducer(producerId, info);
if (success) {
successCount++;
} else {
skippedCount++;
}
} catch (Exception e) {
logger.error("[ERR-013] Error collecting metrics for producer {}: {}", producerId, e.getMessage(), e);
}
}
logger.debug(
"Completed metrics collection cycle: {} producers processed, {} reported stats, {} skipped",
totalProducers, successCount, skippedCount);
} catch (Exception e) {
logger.error("[ERR-014] Error in metrics collection cycle: {}", e.getMessage(), e);
}
}
/**
* Collect metrics for a single producer.
*
* @return true if metrics were successfully collected and reported, false if
* skipped
*/
public boolean collectMetricsForProducer(String producerId, ProducerMetricsInfo info) {
try {
Object producer = info.getProducer();
ClientStatsReporter reporter = info.getReporter();
// Get the metrics object from the producer
Object metrics = extractFieldValue(producer, "metrics");
if (metrics == null) {
logger.debug("No metrics object found in producer {}", producerId);
return false;
}
// Extract the metrics map once per invocation and reuse in subsequent calculations
java.util.Map<?,?> metricsMap = extractMetricsMap(metrics);
// Try to get the compression ratio metric; fall back to 1.0 (no compression)
double compressionRatio = getCompressionRatio(metricsMap);
if (compressionRatio <= 0) {
logger.debug("No compression ratio metric found; assuming ratio 1.0 for producer {}", producerId);
compressionRatio = 1.0;
}
// Get the outgoing-byte-total metrics - this is a per-node metric that
// represents compressed bytes. We need to sum it across all nodes and it is
// cumulative over time.
long totalOutgoingBytes = getOutgoingBytesTotal(metricsMap);
// Calculate the delta since the last collection cycle (can be zero when idle)
CompressionStats prevStats = info.getLastStats();
long compressedBytes = Math.max(0, totalOutgoingBytes - prevStats.compressedBytes);
// Use the compression ratio to calculate the uncompressed size
// compression_ratio = compressed_size / uncompressed_size
// Therefore: uncompressed_size = compressed_size / compression_ratio
long uncompressedBytes = (compressionRatio > 0 && compressedBytes > 0)
? (long) (compressedBytes / compressionRatio)
: 0;
// Update the last stats with the new total (cumulative) bytes
// For uncompressed, add the new delta to the previous total
info.updateLastStats(
new CompressionStats(totalOutgoingBytes, prevStats.uncompressedBytes + uncompressedBytes));
// Create snapshots for different metric types
java.util.Map<String, Double> allMetricsSnapshot = new java.util.HashMap<>();
java.util.Map<String, java.util.Map<String, Double>> topicMetricsSnapshot = new java.util.HashMap<>();
java.util.Map<String, java.util.Map<String, Double>> nodeMetricsSnapshot = new java.util.HashMap<>();
java.util.Map<String, String> appInfoMetricsSnapshot = new java.util.HashMap<>();
try {
java.util.Map<?, ?> rawMetricsMap = metricsMap;
if (rawMetricsMap != null) {
for (java.util.Map.Entry<?, ?> mEntry : rawMetricsMap.entrySet()) {
Object mKey = mEntry.getKey();
String group = null;
String namePart;
String keyString = null;
String topicName = null;
String nodeId = null;
if (mKey == null) continue;
if (mKey.getClass().getName().endsWith("MetricName")) {
try {
java.lang.reflect.Method nameMethod = findMethod(mKey.getClass(), "name");
java.lang.reflect.Method groupMethod = findMethod(mKey.getClass(), "group");
java.lang.reflect.Method tagsMethod = findMethod(mKey.getClass(), "tags");
namePart = (nameMethod != null) ? nameMethod.invoke(mKey).toString() : mKey.toString();
group = (groupMethod != null) ? groupMethod.invoke(mKey).toString() : "";
if ("producer-metrics".equals(group)) {
keyString = namePart;
} else if ("producer-topic-metrics".equals(group)) {
if (tagsMethod != null) {
tagsMethod.setAccessible(true);
Object tagObj = tagsMethod.invoke(mKey);
if (tagObj instanceof java.util.Map) {
Object topicObj = ((java.util.Map<?,?>)tagObj).get("topic");
if (topicObj != null) {
topicName = topicObj.toString();
keyString = namePart;
}
}
}
} else if ("producer-node-metrics".equals(group)) {
if (tagsMethod != null) {
tagsMethod.setAccessible(true);
Object tagObj = tagsMethod.invoke(mKey);
if (tagObj instanceof java.util.Map) {
Object nodeIdObj = ((java.util.Map<?,?>)tagObj).get("node-id");
if (nodeIdObj != null) {
String rawNodeId = nodeIdObj.toString();
String[] parts = rawNodeId.split("-");
if (parts.length > 1) {
try {
int id = Integer.parseInt(parts[1]);
if (id >= 0) {
nodeId = String.valueOf(id);
keyString = namePart;
}
} catch (NumberFormatException e) {
// ignore making us ignore the negative node IDs (represents metrics from the controller that we don't care about)
}
}
}
}
}
} else if ("app-info".equals(group)) {
keyString = namePart;
} else {
continue; // skip non-producer groups
}
} catch (Exception e) {
logger.debug("Failed to process metric name: {}", e.getMessage());
}
} else if (mKey instanceof String) {
keyString = mKey.toString();
// producer-metrics group (per producer)
if (keyString.startsWith("producer-metrics.")) {
keyString = keyString.substring("producer-metrics.".length());
// producer-topic-metrics group (per topic)
} else if (keyString.startsWith("producer-topic-metrics.")) {
String[] parts = keyString.split("\\.", 3);
if (parts.length == 3) {
topicName = parts[1];
keyString = parts[2];
}
// producer-node-metrics group (per broker node)
} else if (keyString.startsWith("producer-node-metrics.")) {
String[] parts = keyString.split("\\.", 3);
if (parts.length == 3) {
String rawNodeId = parts[1];
String[] idParts = rawNodeId.split("-");
if (idParts.length > 1) {
try {
int id = Integer.parseInt(idParts[1]);
if (id >= 0) {
nodeId = String.valueOf(id);
keyString = parts[2];
}
} catch (NumberFormatException e) {
// ignore making us ignore the negative node IDs (represents metrics from the controller that we don't care about)
}
}
}
// app-info group (string values)
} else if (keyString.startsWith("app-info.")) {
keyString = keyString.substring("app-info.".length());
// skip metrics groups that are not producer-metrics, producer-topic-metrics, producer-node-metrics, or app-info
} else if (!keyString.startsWith("producer-metrics") &&
!keyString.startsWith("producer-topic-metrics") &&
!keyString.startsWith("producer-node-metrics") &&
!keyString.startsWith("app-info")) {
continue;
}
}
if (keyString == null) continue;
// Handle app-info metrics differently - store as strings
if ("app-info".equals(group) ||
(mKey instanceof String && ((String)mKey).startsWith("app-info"))) {
Object value = mEntry.getValue();
String stringValue = null;
if (value != null) {
// Try to extract the value from KafkaMetric if possible
try {
java.lang.reflect.Method metricValueMethod = value.getClass().getMethod("metricValue");
Object actualValue = metricValueMethod.invoke(value);
stringValue = (actualValue != null) ? actualValue.toString() : null;
} catch (Exception e) {
stringValue = value.toString();
}
}
if (stringValue != null) {
appInfoMetricsSnapshot.put(keyString, stringValue);
}
continue;
}
// Handle numeric metrics
double mVal = extractMetricValue(mEntry.getValue());
if (!Double.isNaN(mVal)) {
if (topicName != null) {
topicMetricsSnapshot.computeIfAbsent(topicName, k -> new java.util.HashMap<>())
.put(keyString, mVal);
} else if (nodeId != null) {
nodeMetricsSnapshot.computeIfAbsent(nodeId, k -> new java.util.HashMap<>())
.put(keyString, mVal);
} else {
allMetricsSnapshot.put(keyString, mVal);
}
}
}
}
} catch (Exception snapshotEx) {
logger.error("[ERR-015] Error extracting metrics snapshot for producer {}: {}", producerId, snapshotEx.getMessage(), snapshotEx);
}
// Update reporter with latest metrics snapshots
reporter.updateProducerMetrics(allMetricsSnapshot);
reporter.updateTopicMetrics(topicMetricsSnapshot);
reporter.updateNodeMetrics(nodeMetricsSnapshot);
reporter.updateAppInfoMetrics(appInfoMetricsSnapshot);
// Aggregate topics written by this producer from producer-topic-metrics
java.util.Set<String> newTopics = new java.util.HashSet<>();
try {
java.util.Map<?,?> rawMapForTopics = metricsMap;
if (rawMapForTopics != null) {
for (java.util.Map.Entry<?,?> me : rawMapForTopics.entrySet()) {
Object k = me.getKey();
if (k == null) continue;
if (k.getClass().getName().endsWith("MetricName")) {
try {
java.lang.reflect.Method groupMethod = findMethod(k.getClass(), "group");
java.lang.reflect.Method tagsMethod = findMethod(k.getClass(), "tags");
if (groupMethod != null && tagsMethod != null) {
groupMethod.setAccessible(true);
String g = groupMethod.invoke(k).toString();
if ("producer-topic-metrics".equals(g)) {
tagsMethod.setAccessible(true);
Object tagObj = tagsMethod.invoke(k);
if (tagObj instanceof java.util.Map) {
Object topicObj = ((java.util.Map<?,?>)tagObj).get("topic");
if (topicObj != null) newTopics.add(topicObj.toString());
}
}
}
} catch (Exception e) {
logger.debug("Failed to extract topic from metric tags: {}", e.getMessage());
}
}
}
}
} catch (Exception e) {
logger.debug("Failed to aggregate topics from metrics: {}", e.getMessage());
}
if (!newTopics.isEmpty()) {
reporter.addTopics(newTopics);
}
// Report the compression statistics for this interval (delta)
reporter.recordBatch(uncompressedBytes, compressedBytes);
logger.debug("Producer {} compression collected: before={} bytes, after={} bytes, ratio={}",
producerId, uncompressedBytes, compressedBytes, String.format("%.4f", compressionRatio));
return true;
} catch (Exception e) {
logger.error("[ERR-016] Error collecting Kafka metrics for producer {}: {}", producerId, e.getMessage(), e);
return false;
}
}
/**
* Get the compression ratio from the metrics object.
*/
public double getCompressionRatio(java.util.Map<?,?> metricsMap) {
try {
if (metricsMap != null) {
logger.debug("Metrics map size: {}", metricsMap.size());
double compressionRatio = findDirectCompressionMetric(metricsMap);
if (compressionRatio > 0) {
return compressionRatio;
}
}
} catch (Exception e) {
logger.debug("Error getting compression ratio: " + e.getMessage(), e);
}
return 0;
}
/**
* Find direct compression metrics in the metrics map.
*/
private double findDirectCompressionMetric(java.util.Map<?, ?> metricsMap) {
// Look for compression metrics in the *producer-metrics* group only
for (java.util.Map.Entry<?, ?> entry : metricsMap.entrySet()) {
Object key = entry.getKey();
// Handle MetricName keys
if (key.getClass().getName().endsWith("MetricName")) {
try {
Method nameMethod = findMethod(key.getClass(), "name");
Method groupMethod = findMethod(key.getClass(), "group");
if (nameMethod != null && groupMethod != null) {
nameMethod.setAccessible(true);
groupMethod.setAccessible(true);
Object nameObj = nameMethod.invoke(key);
Object groupObj = groupMethod.invoke(key);
if (nameObj == null || groupObj == null) {
continue;
}
String name = nameObj.toString();
String group = groupObj.toString();
// Only accept metrics from producer-metrics group
if ("producer-metrics".equals(group) &&
("compression-rate-avg".equals(name) || "compression-ratio".equals(name))) {
double value = extractMetricValue(entry.getValue());
if (value >= 0) {
logger.debug("Found producer-metrics compression metric: {} -> {}", name, value);
return value;
}
}
}
} catch (Exception ignored) {
}
}
// Handle String keys
else if (key instanceof String) {
String keyStr = (String) key;
if (keyStr.startsWith("producer-metrics") &&
(keyStr.contains("compression-rate-avg") || keyStr.contains("compression-ratio"))) {
double value = extractMetricValue(entry.getValue());
if (value >= 0) {
logger.debug("Found producer-metrics compression metric (string key): {} -> {}", keyStr, value);
return value;
}
}
}
}
return 0;
}
/**
* Get the total outgoing bytes for the *producer* (after compression).
* Uses producer-metrics group only to keep numbers per-producer rather than per-node.
*/
public long getOutgoingBytesTotal(java.util.Map<?,?> metricsMap) {
try {
if (metricsMap != null) {
String targetGroup = "producer-metrics";
String[] candidateNames = {"outgoing-byte-total", "byte-total"};
for (java.util.Map.Entry<?, ?> entry : metricsMap.entrySet()) {
Object key = entry.getKey();
// MetricName keys
if (key.getClass().getName().endsWith("MetricName")) {
try {
Method nameMethod = findMethod(key.getClass(), "name");
Method groupMethod = findMethod(key.getClass(), "group");
if (nameMethod != null && groupMethod != null) {
nameMethod.setAccessible(true);
groupMethod.setAccessible(true);
Object nameObj = nameMethod.invoke(key);
Object groupObj = groupMethod.invoke(key);
if (nameObj == null || groupObj == null) {
continue;
}
String name = nameObj.toString();
String group = groupObj.toString();
if (targetGroup.equals(group)) {
for (String n : candidateNames) {
if (n.equals(name)) {
double val = extractMetricValue(entry.getValue());
if (val >= 0) {
logger.debug("Found producer-metrics {} = {}", name, val);
return (long) val;
}
}
}
}
}
} catch (Exception ignored) {}
} else if (key instanceof String) {
String keyStr = (String) key;
if (keyStr.startsWith(targetGroup) && (keyStr.contains("outgoing-byte-total") || keyStr.contains("byte-total"))) {
double val = extractMetricValue(entry.getValue());
if (val >= 0) {
logger.debug("Found producer-metrics byte counter (string key) {} = {}", keyStr, val);
return (long) val;
}
}
}
}
}
} catch (Exception e) {
logger.debug("Error getting outgoing bytes total from producer-metrics: {}", e.getMessage());
}
return 0;
}
/**
* Extract the metrics map from a Metrics object.
* Handles both cases where metrics is a Map directly or a Metrics object
* with an internal 'metrics' field.
*/
private java.util.Map<?, ?> extractMetricsMap(Object metrics) {
if (metrics == null) {
return null;
}
try {
// If it's already a Map, just cast it
if (metrics instanceof java.util.Map) {
return (java.util.Map<?, ?>) metrics;
}
// Try to extract the internal metrics map field
Field metricsField = findField(metrics.getClass(), "metrics");
if (metricsField != null) {
metricsField.setAccessible(true);
Object metricsValue = metricsField.get(metrics);
if (metricsValue instanceof java.util.Map) {
logger.debug("Successfully extracted metrics map from Metrics object");
return (java.util.Map<?, ?>) metricsValue;
}
}
// Try to get metrics through a method
Method getMetricsMethod = findMethod(metrics.getClass(), "metrics", "getMetrics");
if (getMetricsMethod != null) {
getMetricsMethod.setAccessible(true);
Object metricsValue = getMetricsMethod.invoke(metrics);
if (metricsValue instanceof java.util.Map) {
logger.debug("Successfully extracted metrics map via method");
return (java.util.Map<?, ?>) metricsValue;
}
}
logger.debug("Object is neither a Map nor has a metrics field/method: {}",
metrics.getClass().getName());
} catch (Exception e) {
logger.debug("Error extracting metrics map: {}", e.getMessage());
}
return null;
}
/**
* Extract a numeric value from a metric object.
*/
public double extractMetricValue(Object metric) {
if (metric == null) {
return 0;
}
try {
// Try value() method (common in metrics libraries)
Method valueMethod = findMethod(metric.getClass(), "metricValue");
if (valueMethod != null) {
valueMethod.setAccessible(true);
Object value = valueMethod.invoke(metric);
if (value instanceof Number) {
return ((Number) value).doubleValue();
}
}
} catch (Exception e) {
logger.debug("Error extracting metric value: " + e.getMessage());
}
return 0;
}
}
/**
* Simple class to track compression statistics over time.
*/
public static class CompressionStats {
public final long compressedBytes;
public final long uncompressedBytes;
public CompressionStats(long compressedBytes, long uncompressedBytes) {
this.compressedBytes = compressedBytes;
this.uncompressedBytes = uncompressedBytes;
}
}
/**
* Holder for original and optimized configuration maps passed between optimization
* phase and stats reporter creation using ThreadLocal.
*/
public static class ConfigInfo {
public final java.util.Map<String, Object> originalConfig;
public final java.util.Map<String, Object> optimizedConfig;
public final String error;
public ConfigInfo(java.util.Map<String, Object> orig, java.util.Map<String, Object> opt) {
this.originalConfig = orig;
this.optimizedConfig = opt;
this.error = null;
}
public ConfigInfo(java.util.Map<String, Object> orig, java.util.Map<String, Object> opt, String error) {
this.originalConfig = orig;
this.optimizedConfig = opt;
this.error = error;
}
}
/**
* A Properties view that writes through to a backing Map, ensuring updates are visible
* to code that continues to use the original Map instance.
*/
public static class MapBackedProperties extends java.util.Properties {
private static final long serialVersionUID = 1L;
private final java.util.Map<String,Object> backing;
public MapBackedProperties(java.util.Map<String,Object> backing) {
this.backing = backing;
super.putAll(backing);
}
@Override
public synchronized Object put(Object key, Object value) {
try { backing.put(String.valueOf(key), value); } catch (UnsupportedOperationException ignored) {}
return super.put(key, value);
}
@Override
public synchronized Object remove(Object key) {
try { backing.remove(String.valueOf(key)); } catch (UnsupportedOperationException ignored) {}
return super.remove(key);
}
@Override
public synchronized void putAll(java.util.Map<?,?> m) {
for (java.util.Map.Entry<?,?> e : m.entrySet()) {
put(e.getKey(), e.getValue());
}
}
@Override
public String getProperty(String key) {
Object value = backing.get(key);
if (value == null) {
return super.getProperty(key);
}
// Handle special case for bootstrap.servers which can be any Collection<String>
if ("bootstrap.servers".equals(key) && value instanceof java.util.Collection) {
try {
@SuppressWarnings("unchecked")
java.util.Collection<String> serverCollection = (java.util.Collection<String>) value;
return String.join(",", serverCollection);
} catch (ClassCastException e) {
// If the collection doesn't contain strings, fall back to toString()
logger.debug("bootstrap.servers collection contains non-String elements, falling back to toString()");
}
}
// For all other cases, return the original value
return value.toString();
}
@Override
public String getProperty(String key, String defaultValue) {
String result = getProperty(key);
return result != null ? result : defaultValue;
}
@Override
public Object get(Object key) {
return backing.get(key);
}
}
// Utility method to convert Properties to Map<String, Object>
public static java.util.Map<String, Object> propertiesToMap(Properties props) {
java.util.Map<String, Object> map = new java.util.HashMap<>();
if (props != null) {
for (java.util.Map.Entry<Object,Object> entry : props.entrySet()) {
if (entry.getKey() == null) continue;
map.put(String.valueOf(entry.getKey()), entry.getValue());
}
}
return map;
}
/**
* Mark a producer as closed.
*
* @param producer the producer instance
* @return {@code true} if this is the first time we saw close() for this instance
*/
public static boolean markProducerClosed(Object producer) {
if (producer == null || isDisabled()) {
return false;
}
try {
String producerId = "producer-" + System.identityHashCode(producer);
ProducerMetricsInfo info = producerMetricsMap.get(producerId);
if (info != null) {
if (info.isActive.getAndSet(false)) {
logger.debug("Producer {} marked as closed; metrics collection will stop", producerId);
try {
ClientStatsReporter reporter = info.getReporter();
if (reporter != null) {
// Stop the reporter and deregister it from coordinators
reporter.deactivate();
ai.superstream.core.ClientStatsReporter.deregisterReporter(reporter);
}
} catch (Exception e) {
logger.debug("Error deactivating reporter for {}: {}", producerId, e.getMessage());
}
// Remove from lookup maps to free memory
clientStatsReporters.remove(producerId);
producerMetricsMap.remove(producerId);
return true;
} else {
return false; // already closed previously
}
}
// no info found
return false;
} catch (Exception e) {
logger.error("[ERR-200] Failed to mark producer as closed: {}", e.getMessage(), e);
return false;
}
}
}
|
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/agent/SuperstreamAgent.java | package ai.superstream.agent;
import ai.superstream.util.SuperstreamLogger;
import net.bytebuddy.agent.builder.AgentBuilder;
import net.bytebuddy.asm.Advice;
import net.bytebuddy.matcher.ElementMatchers;
import java.lang.instrument.Instrumentation;
import java.util.HashMap;
import java.util.Map;
/**
* Java agent entry point for the Superstream library.
*/
public class SuperstreamAgent {
public static final SuperstreamLogger logger = SuperstreamLogger.getLogger(SuperstreamAgent.class);
/**
* Premain method, called when the agent is loaded during JVM startup.
*
* @param arguments Agent arguments
* @param instrumentation Instrumentation instance
*/
public static void premain(String arguments, Instrumentation instrumentation) {
// Check environment variable
String debugEnv = System.getenv("SUPERSTREAM_DEBUG");
if ("true".equalsIgnoreCase(debugEnv)) {
SuperstreamLogger.setDebugEnabled(true);
}
install(instrumentation);
// Log all SUPERSTREAM_ environment variables
Map<String, String> superstreamEnvVars = new HashMap<>();
System.getenv().forEach((key, value) -> {
if (key.startsWith("SUPERSTREAM_")) {
superstreamEnvVars.put(key, value);
}
});
logger.info("Superstream Agent initialized with environment variables: {}", superstreamEnvVars);
}
/**
* AgentMain method, called when the agent is loaded after JVM startup.
*
* @param arguments Agent arguments
* @param instrumentation Instrumentation instance
*/
public static void agentmain(String arguments, Instrumentation instrumentation) {
install(instrumentation);
// Log all SUPERSTREAM_ environment variables
Map<String, String> superstreamEnvVars = new HashMap<>();
System.getenv().forEach((key, value) -> {
if (key.startsWith("SUPERSTREAM_")) {
superstreamEnvVars.put(key, value);
}
});
logger.info("Superstream Agent initialized (dynamic attach) with environment variables: {}", superstreamEnvVars);
}
/**
* Install the agent instrumentation.
*
* @param instrumentation Instrumentation instance
*/
private static void install(Instrumentation instrumentation) {
// Intercept KafkaProducer constructor for both configuration optimization and
// metrics collection
new AgentBuilder.Default()
.disableClassFormatChanges()
.type(ElementMatchers.nameEndsWith(".KafkaProducer")
.and(ElementMatchers.not(ElementMatchers.nameContains("ai.superstream")))) // prevent instrumenting superstream's own KafkaProducer
.transform((builder, td, cl, module, pd) -> builder
.visit(Advice.to(KafkaProducerInterceptor.class)
.on(ElementMatchers.isConstructor()))
.visit(Advice.to(KafkaProducerCloseInterceptor.class)
.on(ElementMatchers.named("close"))))
.installOn(instrumentation);
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/core/ClientStatsCollector.java | package ai.superstream.core;
import java.util.concurrent.atomic.AtomicLong;
/**
* Collects and tracks client statistics for Kafka producers.
* This class provides methods to record data about producer operations
* and calculates compression ratio metrics.
*/
public class ClientStatsCollector {
// Tracks the total bytes written before compression since last report
private final AtomicLong totalBytesBeforeCompression = new AtomicLong(0);
// Tracks the total bytes written after compression since last report
private final AtomicLong totalBytesAfterCompression = new AtomicLong(0);
/**
* Records data about a batch of messages sent by a producer.
*
* @param uncompressedSize Size of the batch before compression (in bytes)
* @param compressedSize Size of the batch after compression (in bytes)
*/
public void recordBatch(long uncompressedSize, long compressedSize) {
totalBytesBeforeCompression.addAndGet(uncompressedSize);
totalBytesAfterCompression.addAndGet(compressedSize);
}
/**
* Captures current statistics and resets counters atomically.
* This prevents race conditions between reading values and resetting them.
*
* @return Object containing both before and after compression sizes
*/
public synchronized Stats captureAndReset() {
// Get current values
long beforeCompression = totalBytesBeforeCompression.get();
long afterCompression = totalBytesAfterCompression.get();
// Reset counters
totalBytesBeforeCompression.set(0);
totalBytesAfterCompression.set(0);
// Return captured values
return new Stats(beforeCompression, afterCompression);
}
/**
* Container for statistics captured at a point in time.
*/
public static class Stats {
private final long bytesBeforeCompression;
private final long bytesAfterCompression;
public Stats(long bytesBeforeCompression, long bytesAfterCompression) {
this.bytesBeforeCompression = bytesBeforeCompression;
this.bytesAfterCompression = bytesAfterCompression;
}
public long getBytesBeforeCompression() {
return bytesBeforeCompression;
}
public long getBytesAfterCompression() {
return bytesAfterCompression;
}
public double getCompressionRatio() {
if (bytesBeforeCompression == 0) {
return 1.0;
}
return (double) bytesAfterCompression / bytesBeforeCompression;
}
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/core/ClientStatsReporter.java | package ai.superstream.core;
import ai.superstream.agent.KafkaProducerInterceptor;
import ai.superstream.model.ClientStatsMessage;
import ai.superstream.util.NetworkUtils;
import ai.superstream.util.SuperstreamLogger;
import ai.superstream.util.KafkaPropertiesUtils;
import ai.superstream.util.ClientUtils;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.ConcurrentSkipListSet;
/**
* Reports client statistics to the superstream.clients topic periodically.
*/
public class ClientStatsReporter {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(ClientStatsReporter.class);
private static final String CLIENTS_TOPIC = "superstream.clients";
private static final ObjectMapper objectMapper = new ObjectMapper()
.configure(com.fasterxml.jackson.databind.SerializationFeature.FAIL_ON_EMPTY_BEANS, false)
.configure(com.fasterxml.jackson.databind.DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
// Default reporting interval (5 minutes) – overridden when metadata provides a different value
private static final long DEFAULT_REPORT_INTERVAL_MS = 300000; // 5 minutes
private static final String DISABLED_ENV_VAR = "SUPERSTREAM_DISABLED";
// Shared scheduler for all reporters to minimize thread usage
private static final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(r -> {
Thread t = new Thread(r, "superstream-client-stats-reporter");
t.setDaemon(true);
return t;
});
// Coordinator per cluster to minimise producer usage
private static final ConcurrentHashMap<String, ClusterStatsCoordinator> coordinators = new ConcurrentHashMap<>();
private final ClientStatsCollector statsCollector;
private final Properties producerProperties;
private final String clientId;
private final AtomicBoolean registered = new AtomicBoolean(false);
private final boolean disabled;
private final String producerUuid;
private final AtomicReference<java.util.Map<String, Double>> latestMetrics = new AtomicReference<>(
new java.util.HashMap<>());
private java.util.Map<String, java.util.Map<String, Double>> latestTopicMetrics = new java.util.HashMap<>();
private java.util.Map<String, java.util.Map<String, Double>> latestNodeMetrics = new java.util.HashMap<>();
private java.util.Map<String, String> latestAppInfoMetrics = new java.util.HashMap<>();
private final ConcurrentSkipListSet<String> topicsWritten = new ConcurrentSkipListSet<>();
private volatile java.util.Map<String, Object> originalConfig = null;
private volatile java.util.Map<String, Object> optimizedConfig = null;
private String mostImpactfulTopic;
private String error;
/**
* Creates a new client stats reporter.
*
* @param bootstrapServers Kafka bootstrap servers
* @param clientProperties Producer properties to use for authentication
* @param clientId The client ID to include in reports
* @param producerUuid The producer UUID
*/
public ClientStatsReporter(String bootstrapServers, Properties clientProperties, String clientId, String producerUuid) {
this.clientId = clientId;
this.disabled = Boolean.parseBoolean(System.getenv(DISABLED_ENV_VAR));
this.producerUuid = producerUuid;
if (this.disabled) {
logger.debug("Superstream stats reporting is disabled via environment variable");
}
this.statsCollector = new ClientStatsCollector();
// Copy essential client configuration properties from the original client
this.producerProperties = new Properties();
KafkaPropertiesUtils.copyClientConfigurationProperties(clientProperties, this.producerProperties);
// Set up basic producer properties
this.producerProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
this.producerProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
this.producerProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
this.producerProperties.put(ProducerConfig.CLIENT_ID_CONFIG,
KafkaProducerInterceptor.SUPERSTREAM_LIBRARY_PREFIX + "client-stats-reporter");
// Use efficient compression settings for the reporter itself
this.producerProperties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "zstd");
this.producerProperties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
this.producerProperties.put(ProducerConfig.LINGER_MS_CONFIG, 1000);
// Mark as registered for recordBatch logic
this.registered.set(true);
// Register with per-cluster coordinator
String clusterKey = normalizeBootstrapServers(bootstrapServers);
ClusterStatsCoordinator coord = coordinators.computeIfAbsent(clusterKey,
k -> new ClusterStatsCoordinator(bootstrapServers, producerProperties));
coord.addReporter(this);
}
/**
* Records compression statistics for a batch of messages.
* This method should be called by the producer each time it sends a batch.
*
* @param uncompressedSize Size of batch before compression (in bytes)
* @param compressedSize Size of batch after compression (in bytes)
*/
public void recordBatch(long uncompressedSize, long compressedSize) {
// Only record if we're actually running and not disabled
if (registered.get() && !disabled) {
statsCollector.recordBatch(uncompressedSize, compressedSize);
}
}
// Drain stats into producer, called by coordinator
void drainInto(Producer<String, String> producer) {
if (disabled || !registered.get()) {
return; // Do not send stats when disabled or deactivated
}
try {
ClientStatsCollector.Stats stats = statsCollector.captureAndReset();
long totalBytesBefore = stats.getBytesBeforeCompression();
long totalBytesAfter = stats.getBytesAfterCompression();
ClientStatsMessage message = new ClientStatsMessage(
clientId,
NetworkUtils.getLocalIpAddress(),
totalBytesBefore,
totalBytesAfter,
ClientUtils.getClientVersion(),
NetworkUtils.getHostname(),
producerUuid);
// Always attach metric snapshots, defaulting to empty maps if no data available
java.util.Map<String, Double> metricsSnapshot = latestMetrics.get();
message.setProducerMetrics(metricsSnapshot != null ? metricsSnapshot : new java.util.HashMap<>());
java.util.Map<String, java.util.Map<String, Double>> topicMetricsSnapshot = latestTopicMetrics;
message.setTopicMetrics(topicMetricsSnapshot != null ? topicMetricsSnapshot : new java.util.HashMap<>());
java.util.Map<String, java.util.Map<String, Double>> nodeMetricsSnapshot = latestNodeMetrics;
message.setNodeMetrics(nodeMetricsSnapshot != null ? nodeMetricsSnapshot : new java.util.HashMap<>());
java.util.Map<String, String> appInfoMetricsSnapshot = latestAppInfoMetrics;
message.setAppInfoMetrics(appInfoMetricsSnapshot != null ? appInfoMetricsSnapshot : new java.util.HashMap<>());
// Always set originalConfig and optimizedConfig, defaulting to empty map if null
message.setOriginalConfiguration(originalConfig != null ? originalConfig : new java.util.HashMap<>());
message.setOptimizedConfiguration(optimizedConfig != null ? optimizedConfig : new java.util.HashMap<>());
// Attach topics list - always set it, empty if no topics
message.setTopics(new java.util.ArrayList<>(topicsWritten));
// When building the ClientStatsMessage, set the most impactful topic if available
if (mostImpactfulTopic != null) {
message.setMostImpactfulTopic(mostImpactfulTopic);
}
// Set language and error fields
message.setLanguage("Java");
message.setError(error != null ? error : "");
String json = objectMapper.writeValueAsString(message);
ProducerRecord<String, String> record = new ProducerRecord<>(CLIENTS_TOPIC, json);
producer.send(record);
logger.debug("Producer {} stats sent: before={} bytes, after={} bytes",
clientId, totalBytesBefore, totalBytesAfter);
} catch (Exception e) {
logger.error("[ERR-021] Failed to drain stats for client {}: {}", clientId, e.getMessage(), e);
}
}
private static String normalizeBootstrapServers(String servers) {
if (servers == null)
return "";
String[] parts = servers.split(",");
java.util.Arrays.sort(parts);
return String.join(",", parts).trim();
}
/**
* Merge the latest producer-level metrics into the cached snapshot.
* <p>
* We <strong>merge</strong> instead of replacing the whole map so that metrics which are
* temporarily absent (e.g. become NaN / not reported while the producer is idle)
* still appear in the next heartbeat with their <em>last known</em> value. Any key
* present in {@code metrics} overwrites the previous value – even when the new
* value is {@code 0.0}, negative or otherwise – but keys that are <em>missing</em>
* are left untouched.
* <p>
* Special handling for compression-rate-avg and record-size-avg: if the new value is 0 and the previous
* value is greater than 0, preserve the previous value.
*/
public void updateProducerMetrics(java.util.Map<String, Double> metrics) {
if (!disabled && metrics != null) {
latestMetrics.updateAndGet(prev -> {
java.util.Map<String, Double> merged = new java.util.HashMap<>(prev);
// Special handling for compression-rate-avg and record-size-avg
for (java.util.Map.Entry<String, Double> entry : metrics.entrySet()) {
String key = entry.getKey();
Double newValue = entry.getValue();
if (("compression-rate-avg".equals(key) || "record-size-avg".equals(key)) && newValue != null && newValue == 0.0) {
Double prevValue = merged.get(key);
if (prevValue != null && prevValue > 0.0) {
// Keep the previous non-zero value instead of overwriting with 0
continue;
}
}
merged.put(key, newValue);
}
return merged;
});
}
}
/**
* Merge the latest per-topic metrics. Same rationale as above, but we first
* locate / create the nested map for each topic, then merge its individual
* metric values.
* <p>
* Special handling for compression-rate: if the new value is 0 and the previous
* value is greater than 0, preserve the previous value.
*/
public void updateTopicMetrics(java.util.Map<String, java.util.Map<String, Double>> topicMetrics) {
if (!disabled && topicMetrics != null) {
topicMetrics.forEach((topic, metricMap) -> {
java.util.Map<String, Double> existing = latestTopicMetrics.computeIfAbsent(topic, k -> new java.util.HashMap<>());
// Special handling for compression-rate
for (java.util.Map.Entry<String, Double> entry : metricMap.entrySet()) {
String key = entry.getKey();
Double newValue = entry.getValue();
if ("compression-rate".equals(key) && newValue != null && newValue == 0.0) {
Double prevValue = existing.get(key);
if (prevValue != null && prevValue > 0.0) {
// Keep the previous non-zero value instead of overwriting with 0
continue;
}
}
existing.put(key, newValue);
}
});
}
}
/**
* Merge the latest per-node metrics (broker-level statistics). Behaviour is
* analogous to {@link #updateTopicMetrics}.
*/
public void updateNodeMetrics(java.util.Map<String, java.util.Map<String, Double>> nodeMetrics) {
if (!disabled && nodeMetrics != null) {
nodeMetrics.forEach((node, metricMap) -> {
java.util.Map<String, Double> existing = latestNodeMetrics.computeIfAbsent(node, k -> new java.util.HashMap<>());
existing.putAll(metricMap);
});
}
}
/**
* Merge the latest <code>app-info</code> gauge values. These are string
* properties (Kafka version, client id, etc.), so we store them as
* {@code Map<String,String>}. As with the numeric maps we merge to retain
* previously-seen keys that might be absent in the current snapshot.
*/
public void updateAppInfoMetrics(java.util.Map<String, String> appInfoMetrics) {
if (appInfoMetrics != null && !disabled) {
if (this.latestAppInfoMetrics == null) {
this.latestAppInfoMetrics = new java.util.HashMap<>();
}
this.latestAppInfoMetrics.putAll(appInfoMetrics);
}
}
public void addTopics(java.util.Collection<String> topics) {
if (!disabled && topics != null) {
topicsWritten.addAll(topics);
}
}
public void setConfigurations(java.util.Map<String, Object> originalCfg,
java.util.Map<String, Object> optimizedCfg) {
if (!disabled) {
this.originalConfig = (originalCfg != null) ? originalCfg : new java.util.HashMap<>();
this.optimizedConfig = (optimizedCfg != null) ? optimizedCfg : new java.util.HashMap<>();
}
}
public void updateMostImpactfulTopic(String topic) {
this.mostImpactfulTopic = topic;
}
public void updateError(String error) {
this.error = error;
}
// Coordinator class per cluster
private static class ClusterStatsCoordinator {
private final String bootstrapServers;
private final Properties baseProps;
private final CopyOnWriteArrayList<ClientStatsReporter> reporters = new CopyOnWriteArrayList<>();
private final AtomicBoolean scheduled = new AtomicBoolean(false);
// Report interval for this cluster (milliseconds)
private final long reportIntervalMs;
ClusterStatsCoordinator(String bootstrapServers, Properties baseProps) {
this.bootstrapServers = bootstrapServers;
this.baseProps = baseProps;
long interval = DEFAULT_REPORT_INTERVAL_MS;
try {
ai.superstream.model.MetadataMessage meta = ai.superstream.core.SuperstreamManager.getInstance()
.getOrFetchMetadataMessage(bootstrapServers, baseProps).getKey();
if (meta != null && meta.getReportIntervalMs() != null && meta.getReportIntervalMs() > 0) {
interval = meta.getReportIntervalMs();
}
} catch (Exception e) {
logger.warn("Could not obtain report interval from metadata: {}. Using default {} ms", e.getMessage(), DEFAULT_REPORT_INTERVAL_MS);
}
this.reportIntervalMs = interval;
}
void addReporter(ClientStatsReporter r) {
reporters.add(r);
// Schedule immediate run for this specific reporter
scheduler.schedule(() -> {
try (Producer<String, String> producer = new KafkaProducer<>(baseProps)) {
r.drainInto(producer);
producer.flush();
} catch (Exception e) {
logger.error("[ERR-046] Failed to send immediate stats for new reporter: {}", e.getMessage(), e);
}
}, 0, TimeUnit.MILLISECONDS);
// Only schedule the periodic task if not already scheduled
if (scheduled.compareAndSet(false, true)) {
scheduler.scheduleAtFixedRate(this::run, reportIntervalMs, reportIntervalMs, TimeUnit.MILLISECONDS);
}
}
// Allows outer class to remove a reporter when the underlying KafkaProducer is closed
void removeReporter(ClientStatsReporter r) {
reporters.remove(r);
}
private void run() {
if (reporters.isEmpty())
return;
// Log the configuration before creating producer
if (SuperstreamLogger.isDebugEnabled()) {
StringBuilder configLog = new StringBuilder("Creating internal ClientStatsReporter producer with configuration: ");
baseProps.forEach((key, value) -> {
// Mask sensitive values
if (key.toString().toLowerCase().contains("password") ||
key.toString().toLowerCase().contains("sasl.jaas.config") ||
key.toString().equals("basic.auth.user.info")) {
configLog.append(key).append("=[MASKED], ");
} else {
configLog.append(key).append("=").append(value).append(", ");
}
});
// Remove trailing comma and space
if (configLog.length() > 2) {
configLog.setLength(configLog.length() - 2);
}
logger.debug(configLog.toString());
}
try (Producer<String, String> producer = new KafkaProducer<>(baseProps)) {
for (ClientStatsReporter r : reporters) {
r.drainInto(producer);
}
producer.flush();
logger.debug("Successfully reported cluster stats to {}", CLIENTS_TOPIC);
} catch (Exception e) {
logger.error("[ERR-022] Cluster stats coordinator failed for {}, please make sure the Kafka user has read/write/describe permissions on superstream.* topics: {}", bootstrapServers, e.getMessage(), e);
}
}
}
/**
* Deactivate this reporter so that it no longer emits statistics. The reporter
* remains registered in the coordinator list but {@link #drainInto} becomes a
* no-op which is inexpensive and avoids extra allocations.
*/
public void deactivate() {
registered.set(false);
}
/**
* Remove the given reporter instance from all cluster coordinators. Called by the
* agent when the application closes its <code>KafkaProducer</code> so that we
* do not retain references to obsolete reporter objects.
*/
public static void deregisterReporter(ClientStatsReporter reporter) {
if (reporter == null) {
return;
}
coordinators.values().forEach(coord -> coord.removeReporter(reporter));
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/core/ConfigurationOptimizer.java | package ai.superstream.core;
import ai.superstream.model.MetadataMessage;
import ai.superstream.model.TopicConfiguration;
import ai.superstream.util.SuperstreamLogger;
import java.util.*;
import java.util.stream.Collectors;
/**
* Optimizes Kafka producer configurations based on metadata.
*/
public class ConfigurationOptimizer {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(ConfigurationOptimizer.class);
private static final String LATENCY_SENSITIVE_ENV_VAR = "SUPERSTREAM_LATENCY_SENSITIVE";
// List of configuration parameters that should be preserved if larger than
// recommended
private static final Set<String> PRESERVE_IF_LARGER = new HashSet<>(Arrays.asList(
"batch.size",
"linger.ms"));
/**
* Get the optimal configuration for a set of topics.
*
* @param metadataMessage The metadata message
* @param applicationTopics The list of topics that the application might
* produce to
* @return The optimal configuration, or an empty map if no optimization is
* possible
*/
public Map<String, Object> getOptimalConfiguration(MetadataMessage metadataMessage,
List<String> applicationTopics) {
boolean isLatencySensitive = isLatencySensitive();
if (isLatencySensitive) {
logger.debug("Application is marked as latency-sensitive, linger.ms will not be modified");
}
// Get all matching topic configurations
List<TopicConfiguration> topics = Optional.ofNullable(metadataMessage.getTopicsConfiguration())
.orElse(Collections.emptyList());
List<TopicConfiguration> matchingConfigurations = topics.stream()
.filter(config -> applicationTopics.contains(config.getTopicName()))
.collect(Collectors.toList());
Map<String, Object> optimalConfiguration;
if (matchingConfigurations.isEmpty()) {
if (applicationTopics.isEmpty()) {
logger.debug(
"SUPERSTREAM_TOPICS_LIST environment variable contains no topics. Applying default optimizations.");
} else {
logger.debug(
"No matching topic configurations found for the application topics. Applying default optimizations.");
logger.warn(
"The topics you're publishing to haven't been analyzed yet. For optimal results, either wait for the next analysis cycle or trigger one manually via the SuperClient Console");
}
// Apply default optimizations when no matching topics found
optimalConfiguration = new HashMap<>();
optimalConfiguration.put("compression.type", "zstd");
optimalConfiguration.put("batch.size", 32768); // 32KB
// Only add linger if not latency-sensitive
if (!isLatencySensitive) {
optimalConfiguration.put("linger.ms", 5000); // 5 seconds default
logger.debug(
"Default optimizations will be applied: compression.type=zstd, batch.size=32768, linger.ms=5000");
} else {
logger.debug(
"Default optimizations will be applied: compression.type=zstd, batch.size=32768 (linger.ms unchanged)");
}
return optimalConfiguration;
}
// Find the most impactful topic
TopicConfiguration mostImpactfulTopic = findMostImpactfulTopic(matchingConfigurations);
optimalConfiguration = new HashMap<>(mostImpactfulTopic.getOptimizedConfiguration());
// If latency sensitive, remove linger.ms setting
if (isLatencySensitive && optimalConfiguration.containsKey("linger.ms")) {
optimalConfiguration.remove("linger.ms");
logger.info("Ignore linger.ms from optimizations due to latency-sensitive configuration");
}
return optimalConfiguration;
}
/**
* Apply the optimal configuration to the producer properties.
*
* @param properties The producer properties to modify
* @param optimalConfiguration The optimal configuration to apply
* @return The list of configuration keys that were modified
*/
public List<String> applyOptimalConfiguration(Properties properties, Map<String, Object> optimalConfiguration) {
if (optimalConfiguration == null || optimalConfiguration.isEmpty()) {
return Collections.emptyList();
}
List<String> modifiedKeys = new ArrayList<>();
boolean isLatencySensitive = isLatencySensitive();
for (Map.Entry<String, Object> entry : optimalConfiguration.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
if (value == null) {
logger.warn("Skipping null value for configuration key: {}", key);
continue;
}
// Skip linger.ms optimization if the application is latency-sensitive
if ("linger.ms".equals(key) && isLatencySensitive) {
logger.info("Skipping linger.ms optimization due to latency-sensitive configuration");
continue;
}
// Special handling for configurations that should be preserved if larger
if (PRESERVE_IF_LARGER.contains(key)) {
// Get the recommended value as a number
int recommendedValue;
try {
recommendedValue = value instanceof Number ? ((Number) value).intValue()
: Integer.parseInt(value.toString());
} catch (NumberFormatException e) {
logger.warn("Invalid recommended value for {}: {}. Skipping this parameter.", key, value);
continue;
}
// Check if there's an existing setting
Object existingValue = properties.get(key);
if (existingValue != null) {
int existingNumericValue;
try {
existingNumericValue = existingValue instanceof Number ? ((Number) existingValue).intValue()
: Integer.parseInt(existingValue.toString());
} catch (NumberFormatException e) {
logger.warn("Invalid existing {} value: {}. Will use recommended value.", key, existingValue);
existingNumericValue = 0;
}
// Keep the existing value if it's larger than the recommended value
if (existingNumericValue > recommendedValue) {
logger.debug("Keeping existing {} value {} as it's greater than recommended value {}",
key, existingNumericValue, recommendedValue);
continue; // Skip this key, keeping the existing value
}
}
}
// Validate the configuration before applying
if (!isValidConfiguration(key, value)) {
logger.warn("Invalid configuration value for {}: {}. Skipping this parameter.", key, value);
continue;
}
// Store the original value for logging
Object originalValue = properties.get(key);
// Apply the optimization
properties.put(key, value);
modifiedKeys.add(key);
if (originalValue == null) {
logger.info("Setting configuration: {}={} (was not previously set)", key, value);
} else {
logger.info("Overriding configuration: {}={} (was: {})", key, value, originalValue);
}
}
return modifiedKeys;
}
private boolean isValidConfiguration(String key, Object value) {
if (value == null) {
logger.warn("Invalid null value for configuration key: {}", key);
return false;
}
try {
if ("compression.type".equals(key)) {
String compressionType = value.toString();
// Valid compression types in Kafka
return Arrays.asList("none", "gzip", "snappy", "lz4", "zstd").contains(compressionType);
}
// Add validation for other key types as needed
return true;
} catch (Exception e) {
logger.warn("Error validating configuration {}: {}", key, value, e);
return false;
}
}
/**
* Determine if the application is latency-sensitive based on environment
* variable.
*
* @return true if the application is latency-sensitive, false otherwise
*/
public boolean isLatencySensitive() {
String latencySensitiveStr = System.getenv(LATENCY_SENSITIVE_ENV_VAR);
if (latencySensitiveStr != null && !latencySensitiveStr.trim().isEmpty()) {
return Boolean.parseBoolean(latencySensitiveStr.trim());
}
return false; // Default to not latency-sensitive
}
/**
* Helper to find the most impactful topic from a list of matching
* configurations.
*/
private TopicConfiguration findMostImpactfulTopic(List<TopicConfiguration> matchingConfigurations) {
return matchingConfigurations.stream()
.max(Comparator.comparing(TopicConfiguration::calculateImpactScore))
.orElse(null);
}
/**
* Get the most impactful topic name for a set of topics.
*
* @param metadataMessage The metadata message
* @param applicationTopics The list of topics that the application might
* produce to
* @return The name of the most impactful topic, or null if none found
*/
public String getMostImpactfulTopicName(MetadataMessage metadataMessage, List<String> applicationTopics) {
List<TopicConfiguration> topics = Optional.ofNullable(metadataMessage.getTopicsConfiguration())
.orElse(Collections.emptyList());
List<TopicConfiguration> matchingConfigurations = topics.stream()
.filter(config -> applicationTopics.contains(config.getTopicName()))
.collect(Collectors.toList());
if (matchingConfigurations.isEmpty()) {
return null;
}
TopicConfiguration mostImpactfulTopic = findMostImpactfulTopic(matchingConfigurations);
return mostImpactfulTopic != null ? mostImpactfulTopic.getTopicName() : null;
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/core/MetadataConsumer.java | package ai.superstream.core;
import ai.superstream.model.MetadataMessage;
import ai.superstream.util.SuperstreamLogger;
import ai.superstream.util.KafkaPropertiesUtils;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import ai.superstream.agent.KafkaProducerInterceptor;
import java.io.IOException;
import java.time.Duration;
import java.util.*;
/**
* Consumes messages from the superstream.metadata_v1 topic.
*/
public class MetadataConsumer {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(MetadataConsumer.class);
private static final String METADATA_TOPIC = "superstream.metadata_v1";
private static final ObjectMapper objectMapper = new ObjectMapper()
.configure(com.fasterxml.jackson.databind.SerializationFeature.FAIL_ON_EMPTY_BEANS, false)
.configure(com.fasterxml.jackson.databind.DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
/**
* Get the metadata message from the Kafka cluster.
*
* @param bootstrapServers The Kafka bootstrap servers
* @return A pair containing the metadata message (or null if error) and the error message (or null if no error)
*/
public java.util.AbstractMap.SimpleEntry<MetadataMessage, String> getMetadataMessage(String bootstrapServers, Properties originalClientProperties) {
Properties properties = new Properties();
// Copy essential client configuration properties from the original client
KafkaPropertiesUtils.copyClientConfigurationProperties(originalClientProperties, properties);
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
properties.put(ConsumerConfig.CLIENT_ID_CONFIG, KafkaProducerInterceptor.SUPERSTREAM_LIBRARY_PREFIX + "metadata-consumer");
// Log the configuration before creating consumer
if (SuperstreamLogger.isDebugEnabled()) {
StringBuilder configLog = new StringBuilder("Creating internal MetadataConsumer with configuration: ");
properties.forEach((key, value) -> {
// Mask sensitive values
if (key.toString().toLowerCase().contains("password") ||
key.toString().toLowerCase().contains("sasl.jaas.config") ||
key.toString().equals("basic.auth.user.info")) {
configLog.append(key).append("=[MASKED], ");
} else {
configLog.append(key).append("=").append(value).append(", ");
}
});
// Remove trailing comma and space
if (configLog.length() > 2) {
configLog.setLength(configLog.length() - 2);
}
logger.debug(configLog.toString());
}
try (Consumer<String, String> consumer = new KafkaConsumer<>(properties)) {
// Check if the metadata topic exists
Set<String> topics = consumer.listTopics().keySet();
if (!topics.contains(METADATA_TOPIC)) {
String errMsg = "[ERR-034] Superstream internal topic is missing. This topic is required for Superstream to function properly. Please make sure the Kafka user has read/write/describe permissions on superstream.* topics.";
logger.error(errMsg);
return new java.util.AbstractMap.SimpleEntry<>(null, errMsg);
}
// Assign the metadata topic
TopicPartition partition = new TopicPartition(METADATA_TOPIC, 0);
consumer.assign(Collections.singletonList(partition));
// Seek to the end and get the current offset
consumer.seekToEnd(Collections.singletonList(partition));
long endOffset = consumer.position(partition);
if (endOffset == 0) {
String errMsg = "[ERR-035] Unable to retrieve optimizations data from Superstream. This is required for optimization. Please contact the Superstream team if the issue persists.";
logger.error(errMsg);
return new java.util.AbstractMap.SimpleEntry<>(null, errMsg);
}
// Seek to the last message
consumer.seek(partition, endOffset - 1);
// Poll for the message
ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(5));
if (records.isEmpty()) {
String errMsg = "[ERR-036] Unable to retrieve optimizations data from Superstream. This is required for optimization. Please contact the Superstream team if the issue persists.";
logger.error(errMsg);
return new java.util.AbstractMap.SimpleEntry<>(null, errMsg);
}
logger.debug("Successfully retrieved a message from the {} topic", METADATA_TOPIC);
// Parse the message
String json = records.iterator().next().value();
return new java.util.AbstractMap.SimpleEntry<>(objectMapper.readValue(json, MetadataMessage.class), null);
} catch (IOException e) {
String errMsg = "[ERR-027] Unable to retrieve optimizations data from Superstream. This is required for optimization. Please contact the Superstream team if the issue persists: " + e.getMessage();
logger.error(errMsg, e);
return new java.util.AbstractMap.SimpleEntry<>(null, errMsg);
} catch (Exception e) {
String errMsg = "[ERR-028] Unable to retrieve optimizations data from Superstream. This is required for optimization. Please make sure the Kafka user has read/write/describe permissions on superstream.* topics: " + e.getMessage();
logger.error(errMsg, e);
return new java.util.AbstractMap.SimpleEntry<>(null, errMsg);
}
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/core/SuperstreamManager.java | package ai.superstream.core;
import ai.superstream.model.MetadataMessage;
import ai.superstream.util.SuperstreamLogger;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
/**
* Main manager class for the Superstream library.
*/
public class SuperstreamManager {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(SuperstreamManager.class);
private static final String TOPICS_ENV_VAR = "SUPERSTREAM_TOPICS_LIST";
private static final String DISABLED_ENV_VAR = "SUPERSTREAM_DISABLED";
private static final ThreadLocal<Boolean> OPTIMIZATION_IN_PROGRESS = new ThreadLocal<>();
private static volatile SuperstreamManager instance;
private final MetadataConsumer metadataConsumer;
private final ConfigurationOptimizer configurationOptimizer;
private final Map<String, MetadataMessage> metadataCache;
private final boolean disabled;
private SuperstreamManager() {
this.metadataConsumer = new MetadataConsumer();
this.configurationOptimizer = new ConfigurationOptimizer();
this.metadataCache = new ConcurrentHashMap<>();
this.disabled = Boolean.parseBoolean(System.getenv(DISABLED_ENV_VAR));
if (disabled) {
logger.debug("Superstream optimization is disabled via environment variable");
}
}
/**
* Get the configuration optimizer instance.
*
* @return The configuration optimizer instance
*/
public ConfigurationOptimizer getConfigurationOptimizer() {
return configurationOptimizer;
}
/**
* Check if optimization is already in progress for the current thread.
*
* @return true if optimization is in progress, false otherwise
*/
public static boolean isOptimizationInProgress() {
return Boolean.TRUE.equals(OPTIMIZATION_IN_PROGRESS.get());
}
/**
* Set the optimization in progress flag for the current thread.
*
* @param inProgress true if optimization is in progress, false otherwise
*/
public static void setOptimizationInProgress(boolean inProgress) {
if (inProgress) {
OPTIMIZATION_IN_PROGRESS.set(Boolean.TRUE);
} else {
OPTIMIZATION_IN_PROGRESS.remove();
}
}
/**
* Get the singleton instance of the SuperstreamManager.
*
* @return The SuperstreamManager instance
*/
public static SuperstreamManager getInstance() {
if (instance == null) {
synchronized (SuperstreamManager.class) {
if (instance == null) {
instance = new SuperstreamManager();
}
}
}
return instance;
}
public static Map<String, Object> convertPropertiesToMap(Properties properties) {
Map<String, Object> map = new HashMap<>();
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
map.put(String.valueOf(entry.getKey()), entry.getValue());
}
return map;
}
/**
* Optimize the producer properties for a given Kafka cluster.
*
* @param bootstrapServers The Kafka bootstrap servers
* @param clientId The client ID
* @param properties The producer properties to optimize
* @return True if the optimization was successful, false otherwise
*/
public boolean optimizeProducer(String bootstrapServers, String clientId, Properties properties) {
if (disabled) {
return false;
}
// Skip if already optimizing (prevents infinite recursion)
if (isOptimizationInProgress()) {
logger.debug("Skipping optimization for producer {} as optimization is already in progress", clientId);
return false;
}
try {
// Mark optimization as in progress for this thread
setOptimizationInProgress(true);
// Get or fetch the metadata message
java.util.AbstractMap.SimpleEntry<MetadataMessage, String> result = getOrFetchMetadataMessage(bootstrapServers, properties);
MetadataMessage metadataMessage = result.getKey();
String error = result.getValue();
if (metadataMessage == null) {
// Error is already logged in getOrFetchMetadataMessage
// Push ConfigInfo with error and original config for stats reporting
java.util.Deque<ai.superstream.agent.KafkaProducerInterceptor.ConfigInfo> cfgStack = ai.superstream.agent.KafkaProducerInterceptor.TL_CFG_STACK.get();
cfgStack.push(new ai.superstream.agent.KafkaProducerInterceptor.ConfigInfo(convertPropertiesToMap(properties), new java.util.HashMap<>(), error));
return false;
}
// Create a copy of the original configuration for reporting
Properties originalProperties = new Properties();
originalProperties.putAll(properties);
// Check if optimization is active
if (!metadataMessage.isActive()) {
String errMsg = "[ERR-054] Superstream optimization is not active for this kafka cluster, please head to the Superstream console and activate it.";
logger.error(errMsg);
// Push ConfigInfo with error and original config for stats reporting
java.util.Deque<ai.superstream.agent.KafkaProducerInterceptor.ConfigInfo> cfgStack = ai.superstream.agent.KafkaProducerInterceptor.TL_CFG_STACK.get();
cfgStack.push(new ai.superstream.agent.KafkaProducerInterceptor.ConfigInfo(convertPropertiesToMap(properties), new java.util.HashMap<>(), errMsg));
return false;
}
// Get the application topics
List<String> applicationTopics = getApplicationTopics();
// Get the optimal configuration
Map<String, Object> optimalConfiguration = configurationOptimizer.getOptimalConfiguration(
metadataMessage, applicationTopics);
// Capture the full original configuration map BEFORE applying optimizations
Map<String,Object> originalFullMap = convertPropertiesToMap(properties);
// Apply the optimal configuration
List<String> modifiedKeys = configurationOptimizer.applyOptimalConfiguration(properties, optimalConfiguration);
if (modifiedKeys.isEmpty()) {
logger.debug("No configuration parameters were modified");
return false;
}
// Build optimized configuration map to report: include every key that was considered for optimisation.
Map<String, Object> optimizedProperties = new HashMap<>();
for (String key : optimalConfiguration.keySet()) {
// After applyOptimalConfiguration, 'properties' holds the final value (either overridden or original).
Object finalVal = properties.get(key);
if (finalVal == null) {
// If not present in current props, fall back to original value (may be null as well)
finalVal = originalProperties.get(key);
}
if (finalVal != null) {
// Convert numeric strings to actual numbers for reporting
if (finalVal instanceof String) {
String strVal = ((String) finalVal).trim();
try {
if (!strVal.isEmpty()) {
// Prefer Integer when within range, otherwise Long
long longVal = Long.parseLong(strVal);
if (longVal >= Integer.MIN_VALUE && longVal <= Integer.MAX_VALUE) {
finalVal = (int) longVal;
} else {
finalVal = longVal;
}
}
} catch (NumberFormatException ignored) {
// leave as String if not purely numeric
}
}
optimizedProperties.put(key, finalVal);
}
}
// If the application is latency-sensitive we leave linger.ms untouched. Ensure we still report its value
// so that the clients topic contains the complete set actually in effect.
final String LINGER_MS_KEY = "linger.ms";
if (!optimizedProperties.containsKey(LINGER_MS_KEY)) {
Object lingerVal = properties.get(LINGER_MS_KEY);
if (lingerVal == null) {
lingerVal = originalProperties.get(LINGER_MS_KEY);
}
if (lingerVal != null) {
optimizedProperties.put(LINGER_MS_KEY, lingerVal);
}
}
// Pass configuration info via ThreadLocal to interceptor's onExit (full map for original config)
ai.superstream.agent.KafkaProducerInterceptor.TL_CFG_STACK.get()
.push(new ai.superstream.agent.KafkaProducerInterceptor.ConfigInfo(originalFullMap, optimizedProperties));
// Log optimization success with appropriate message based on configuration and client ID
boolean isLatencySensitive = configurationOptimizer.isLatencySensitive();
boolean isUsingDefaults = applicationTopics != null && !applicationTopics.isEmpty() &&
(metadataMessage.getTopicsConfiguration() == null ||
metadataMessage.getTopicsConfiguration().stream().noneMatch(tc -> applicationTopics.contains(tc.getTopicName())));
String baseMessage = isUsingDefaults ?
"Successfully optimized producer with default optimization parameters" :
"Successfully optimized producer configuration";
if (clientId != null && !clientId.trim().isEmpty()) {
baseMessage += " for " + clientId;
}
if (isLatencySensitive) {
baseMessage += " (linger.ms left unchanged due to latency sensitivity)";
}
logger.info(baseMessage);
return true;
} catch (Exception e) {
logger.error("[ERR-030] Failed to optimize producer configuration: {}", e.getMessage(), e);
return false;
} finally {
// Always clear the flag when done
setOptimizationInProgress(false);
}
}
/**
* Get the metadata message for a given Kafka cluster.
*
* @param bootstrapServers The Kafka bootstrap servers
* @return A pair containing the metadata message (or null if error) and the error message (or null if no error)
*/
public java.util.AbstractMap.SimpleEntry<MetadataMessage, String> getOrFetchMetadataMessage(String bootstrapServers, Properties originalProperties) {
// Normalise the bootstrap servers so that different orderings of the same
// Kafka consumers and wasted network calls when the application creates
// multiple producers with logically-identical bootstrap lists such as
// "b1:9092,b2:9092" and "b2:9092,b1:9092".
String cacheKey = normalizeBootstrapServers(bootstrapServers);
// Check the cache first
if (metadataCache.containsKey(cacheKey)) {
return new java.util.AbstractMap.SimpleEntry<>(metadataCache.get(cacheKey), null);
}
// Fetch the metadata using the *original* string (ordering is irrelevant
// for the Kafka client itself)
java.util.AbstractMap.SimpleEntry<MetadataMessage, String> result = metadataConsumer.getMetadataMessage(bootstrapServers, originalProperties);
MetadataMessage metadataMessage = result.getKey();
if (metadataMessage != null) {
metadataCache.put(cacheKey, metadataMessage);
}
return result;
}
/**
* Produce a canonical representation of the bootstrap servers list.
* <p>
* The input may contain duplicates, whitespace or different ordering – we
* split on commas, trim each entry, drop empties, sort the list
* lexicographically and join it back with commas. The resulting string can
* safely be used as a map key that uniquely identifies a Kafka cluster.
*/
private static String normalizeBootstrapServers(String servers) {
if (servers == null) {
return "";
}
String[] parts = servers.split(",");
java.util.List<String> cleaned = new java.util.ArrayList<>();
for (String p : parts) {
if (p == null) continue;
String trimmed = p.trim();
if (!trimmed.isEmpty()) {
cleaned.add(trimmed);
}
}
java.util.Collections.sort(cleaned);
return String.join(",", cleaned);
}
/**
* Get the list of application topics from the environment variable.
*
* @return The list of application topics
*/
private List<String> getApplicationTopics() {
String topicsString = System.getenv(TOPICS_ENV_VAR);
if (topicsString == null || topicsString.trim().isEmpty()) {
return Collections.emptyList();
}
return Arrays.stream(topicsString.split(","))
.map(String::trim)
.filter(s -> !s.isEmpty())
.collect(java.util.stream.Collectors.toList());
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/model/ClientMessage.java | package ai.superstream.model;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Map;
import java.util.Objects;
/**
* Represents a message to be sent to the superstream.clients topic.
*/
public class ClientMessage {
private int superstreamClusterId;
private boolean active;
private String clientId;
private String ipAddress;
private String clientVersion;
private String language;
private String clientType;
private Map<String, Object> originalConfiguration;
private Map<String, Object> optimizedConfiguration;
private String mostImpactfulTopic;
private Map<String, String> environmentVariables;
private String hostname;
private String producerUuid;
private String error;
public ClientMessage() {
// Default constructor for Jackson
}
public ClientMessage(int superstreamClusterId, boolean active, String clientId, String ipAddress, String clientVersion, String language, String clientType,
Map<String, Object> originalConfiguration, Map<String, Object> optimizedConfiguration,
String mostImpactfulTopic, String hostname, String producerUuid, String error) {
this.superstreamClusterId = superstreamClusterId;
this.active = active;
this.clientId = clientId;
this.ipAddress = ipAddress;
this.clientVersion = clientVersion;
this.language = language;
this.clientType = clientType;
this.originalConfiguration = originalConfiguration;
this.optimizedConfiguration = optimizedConfiguration;
this.mostImpactfulTopic = mostImpactfulTopic;
this.environmentVariables = ai.superstream.util.EnvironmentVariables.getSuperstreamEnvironmentVariables();
this.hostname = hostname;
this.producerUuid = producerUuid;
this.error = error;
}
@JsonProperty("superstream_cluster_id")
public int getSuperstreamClusterId() {
return superstreamClusterId;
}
@JsonProperty("superstream_cluster_id")
public void setSuperstreamClusterId(int superstreamClusterId) {
this.superstreamClusterId = superstreamClusterId;
}
@JsonProperty("active")
public boolean isActive() {
return active;
}
@JsonProperty("active")
public void setActive(boolean active) {
this.active = active;
}
@JsonProperty("client_id")
public String getClientId() {
return clientId;
}
@JsonProperty("client_id")
public void setClientId(String clientId) {
this.clientId = clientId;
}
@JsonProperty("ip_address")
public String getIpAddress() {
return ipAddress;
}
@JsonProperty("ip_address")
public void setIpAddress(String ipAddress) {
this.ipAddress = ipAddress;
}
@JsonProperty("version")
public String getClientVersion() {
return clientVersion;
}
@JsonProperty("version")
public void setClientVersion(String clientVersion) {
this.clientVersion = clientVersion;
}
@JsonProperty("language")
public String getLanguage() {
return language;
}
@JsonProperty("language")
public void setLanguage(String language) {
this.language = language;
}
@JsonProperty("client_type")
public String getClientType() {
return clientType;
}
@JsonProperty("client_type")
public void setClientType(String clientType) {
this.clientType = clientType;
}
@JsonProperty("original_configuration")
public Map<String, Object> getOriginalConfiguration() {
return originalConfiguration;
}
@JsonProperty("original_configuration")
public void setOriginalConfiguration(Map<String, Object> originalConfiguration) {
this.originalConfiguration = originalConfiguration;
}
@JsonProperty("optimized_configuration")
public Map<String, Object> getOptimizedConfiguration() {
return optimizedConfiguration;
}
@JsonProperty("optimized_configuration")
public void setOptimizedConfiguration(Map<String, Object> optimizedConfiguration) {
this.optimizedConfiguration = optimizedConfiguration;
}
@JsonProperty("most_impactful_topic")
public String getMostImpactfulTopic() {
return mostImpactfulTopic;
}
@JsonProperty("most_impactful_topic")
public void setMostImpactfulTopic(String mostImpactfulTopic) {
this.mostImpactfulTopic = mostImpactfulTopic;
}
@JsonProperty("environment_variables")
public Map<String, String> getEnvironmentVariables() {
return environmentVariables;
}
@JsonProperty("environment_variables")
public void setEnvironmentVariables(Map<String, String> environmentVariables) {
this.environmentVariables = environmentVariables;
}
@JsonProperty("hostname")
public String getHostname() {
return hostname;
}
@JsonProperty("hostname")
public void setHostname(String hostname) {
this.hostname = hostname;
}
@JsonProperty("superstream_client_uid")
public String getProducerUuid() {
return producerUuid;
}
@JsonProperty("superstream_client_uid")
public void setProducerUuid(String producerUuid) {
this.producerUuid = producerUuid;
}
@JsonProperty("error")
public String getError() {
return error;
}
@JsonProperty("error")
public void setError(String error) {
this.error = error;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClientMessage that = (ClientMessage) o;
return superstreamClusterId == that.superstreamClusterId &&
active == that.active &&
Objects.equals(clientId, that.clientId) &&
Objects.equals(ipAddress, that.ipAddress) &&
Objects.equals(clientVersion, that.clientVersion) &&
Objects.equals(language, that.language) &&
Objects.equals(clientType, that.clientType) &&
Objects.equals(originalConfiguration, that.originalConfiguration) &&
Objects.equals(optimizedConfiguration, that.optimizedConfiguration) &&
Objects.equals(mostImpactfulTopic, that.mostImpactfulTopic) &&
Objects.equals(environmentVariables, that.environmentVariables) &&
Objects.equals(hostname, that.hostname) &&
Objects.equals(producerUuid, that.producerUuid) &&
Objects.equals(error, that.error);
}
@Override
public int hashCode() {
return Objects.hash(superstreamClusterId, active, clientId, ipAddress, clientVersion, language, clientType,
originalConfiguration, optimizedConfiguration, mostImpactfulTopic,
environmentVariables, hostname, producerUuid, error);
}
@Override
public String toString() {
return "ClientMessage{" +
"superstream_cluster_id=" + superstreamClusterId +
", active=" + active +
", client_id='" + clientId + '\'' +
", ip_address='" + ipAddress + '\'' +
", version='" + clientVersion + '\'' +
", language='" + language + '\'' +
", client_type='" + clientType + '\'' +
", original_configuration=" + originalConfiguration +
", optimized_configuration=" + optimizedConfiguration +
", most_impactful_topic='" + mostImpactfulTopic + '\'' +
", environment_variables=" + environmentVariables +
", hostname='" + hostname + '\'' +
", superstream_client_uid='" + producerUuid + '\'' +
", error='" + error + '\'' +
'}';
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/model/ClientStatsMessage.java | package ai.superstream.model;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
/**
* Represents a message containing client statistics information
* to be sent to the superstream.clients topic.
*/
public class ClientStatsMessage {
private String clientId;
private String ipAddress;
private String type;
private String messageType;
private long totalWriteBefore;
private long totalWriteAfter;
private String clientVersion;
private java.util.Map<String, Double> producerMetrics; // Producer metrics map
private java.util.List<String> topics; // Topics written by producer
private java.util.Map<String, java.util.Map<String, Double>> topicMetrics; // Topic-level metrics map
private java.util.Map<String, java.util.Map<String, Double>> nodeMetrics; // Node-level metrics map
private java.util.Map<String, String> appInfoMetrics; // App-info metrics map with string values
private java.util.Map<String,Object> originalConfiguration;
private java.util.Map<String,Object> optimizedConfiguration;
private java.util.Map<String, String> environmentVariables;
private String hostname;
private String producerUuid;
private String mostImpactfulTopic;
private String language = "Java";
private String error;
public ClientStatsMessage() {
// Default constructor for Jackson
}
public ClientStatsMessage(String clientId, String ipAddress,
long totalWriteBefore, long totalWriteAfter,
String clientVersion, String hostname, String producerUuid) {
this.clientId = clientId;
this.ipAddress = ipAddress;
this.type = "producer";
this.messageType = "client_stats";
this.totalWriteBefore = totalWriteBefore;
this.totalWriteAfter = totalWriteAfter;
this.clientVersion = clientVersion;
this.environmentVariables = ai.superstream.util.EnvironmentVariables.getSuperstreamEnvironmentVariables();
this.hostname = hostname;
this.producerUuid = producerUuid;
}
@JsonProperty("client_id")
public String getClientId() {
return clientId;
}
@JsonProperty("client_id")
public void setClientId(String clientId) {
this.clientId = clientId;
}
@JsonProperty("ip_address")
public String getIpAddress() {
return ipAddress;
}
@JsonProperty("ip_address")
public void setIpAddress(String ipAddress) {
this.ipAddress = ipAddress;
}
@JsonProperty("type")
public String getType() {
return type;
}
@JsonProperty("type")
public void setType(String type) {
this.type = type;
}
@JsonProperty("message_type")
public String getMessageType() {
return messageType;
}
@JsonProperty("message_type")
public void setMessageType(String messageType) {
this.messageType = messageType;
}
@JsonProperty("write_before_compression_delta")
public long getTotalWriteBefore() {
return totalWriteBefore;
}
@JsonProperty("write_before_compression_delta")
public void setTotalWriteBefore(long totalWriteBefore) {
this.totalWriteBefore = totalWriteBefore;
}
@JsonProperty("write_after_compression_delta")
public long getTotalWriteAfter() {
return totalWriteAfter;
}
@JsonProperty("write_after_compression_delta")
public void setTotalWriteAfter(long totalWriteAfter) {
this.totalWriteAfter = totalWriteAfter;
}
@JsonProperty("version")
public String getClientVersion() {
return clientVersion;
}
@JsonProperty("version")
public void setClientVersion(String clientVersion) {
this.clientVersion = clientVersion;
}
@JsonProperty("producer_metrics")
public java.util.Map<String, Double> getProducerMetrics() {
return producerMetrics;
}
@JsonProperty("producer_metrics")
public void setProducerMetrics(java.util.Map<String, Double> producerMetrics) {
this.producerMetrics = producerMetrics;
}
@JsonProperty("topic_metrics")
public java.util.Map<String, java.util.Map<String, Double>> getTopicMetrics() {
return topicMetrics;
}
@JsonProperty("topic_metrics")
public void setTopicMetrics(java.util.Map<String, java.util.Map<String, Double>> topicMetrics) {
this.topicMetrics = topicMetrics;
}
@JsonProperty("topics")
public java.util.List<String> getTopics() {
return topics;
}
@JsonProperty("topics")
public void setTopics(java.util.List<String> topics) {
this.topics = topics;
}
@JsonProperty("node_metrics")
public java.util.Map<String, java.util.Map<String, Double>> getNodeMetrics() {
return nodeMetrics;
}
@JsonProperty("node_metrics")
public void setNodeMetrics(java.util.Map<String, java.util.Map<String, Double>> nodeMetrics) {
this.nodeMetrics = nodeMetrics;
}
@JsonProperty("app_info_metrics")
public java.util.Map<String, String> getAppInfoMetrics() {
return appInfoMetrics;
}
@JsonProperty("app_info_metrics")
public void setAppInfoMetrics(java.util.Map<String, String> appInfoMetrics) {
this.appInfoMetrics = appInfoMetrics;
}
@JsonProperty("original_configuration")
public java.util.Map<String,Object> getOriginalConfiguration() { return originalConfiguration; }
@JsonProperty("original_configuration")
public void setOriginalConfiguration(java.util.Map<String,Object> cfg) { this.originalConfiguration = cfg; }
@JsonProperty("optimized_configuration")
public java.util.Map<String,Object> getOptimizedConfiguration() { return optimizedConfiguration; }
@JsonProperty("optimized_configuration")
public void setOptimizedConfiguration(java.util.Map<String,Object> cfg) { this.optimizedConfiguration = cfg; }
@JsonProperty("environment_variables")
public java.util.Map<String, String> getEnvironmentVariables() {
return environmentVariables;
}
@JsonProperty("environment_variables")
public void setEnvironmentVariables(java.util.Map<String, String> environmentVariables) {
this.environmentVariables = environmentVariables;
}
@JsonProperty("hostname")
public String getHostname() {
return hostname;
}
@JsonProperty("hostname")
public void setHostname(String hostname) {
this.hostname = hostname;
}
@JsonProperty("superstream_client_uid")
public String getProducerUuid() {
return producerUuid;
}
@JsonProperty("superstream_client_uid")
public void setProducerUuid(String producerUuid) {
this.producerUuid = producerUuid;
}
@JsonProperty("most_impactful_topic")
public String getMostImpactfulTopic() {
return mostImpactfulTopic == null ? "" : mostImpactfulTopic;
}
@JsonProperty("most_impactful_topic")
public void setMostImpactfulTopic(String mostImpactfulTopic) {
this.mostImpactfulTopic = mostImpactfulTopic;
}
@JsonProperty("language")
public String getLanguage() { return language; }
@JsonProperty("language")
public void setLanguage(String language) { this.language = language; }
@JsonProperty("error")
public String getError() { return error; }
@JsonProperty("error")
public void setError(String error) { this.error = error; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClientStatsMessage that = (ClientStatsMessage) o;
return totalWriteBefore == that.totalWriteBefore &&
totalWriteAfter == that.totalWriteAfter &&
Objects.equals(clientId, that.clientId) &&
Objects.equals(ipAddress, that.ipAddress) &&
Objects.equals(type, that.type) &&
Objects.equals(messageType, that.messageType) &&
Objects.equals(clientVersion, that.clientVersion) &&
Objects.equals(producerMetrics, that.producerMetrics) &&
Objects.equals(topics, that.topics) &&
Objects.equals(originalConfiguration, that.originalConfiguration) &&
Objects.equals(optimizedConfiguration, that.optimizedConfiguration) &&
Objects.equals(environmentVariables, that.environmentVariables) &&
Objects.equals(hostname, that.hostname) &&
Objects.equals(producerUuid, that.producerUuid) &&
Objects.equals(mostImpactfulTopic, that.mostImpactfulTopic) &&
Objects.equals(language, that.language) &&
Objects.equals(error, that.error);
}
@Override
public int hashCode() {
return Objects.hash(clientId, ipAddress, type, messageType, totalWriteBefore,
totalWriteAfter, clientVersion, producerMetrics, topics,
originalConfiguration, optimizedConfiguration, environmentVariables,
hostname, producerUuid, mostImpactfulTopic, language, error);
}
@Override
public String toString() {
return "ClientStatsMessage{" +
"client_id='" + clientId + '\'' +
", ip_address='" + ipAddress + '\'' +
", type='" + type + '\'' +
", message_type='" + messageType + '\'' +
", write_before_compression_delta=" + totalWriteBefore +
", write_after_compression_delta=" + totalWriteAfter +
", version='" + clientVersion + '\'' +
", producer_metrics=" + producerMetrics +
", topics=" + topics +
", original_configuration=" + originalConfiguration +
", optimized_configuration=" + optimizedConfiguration +
", environment_variables=" + environmentVariables +
", hostname='" + hostname + '\'' +
", superstream_client_uid='" + producerUuid + '\'' +
", most_impactful_topic='" + mostImpactfulTopic + '\'' +
", language='" + language + '\'' +
", error='" + error + '\'' +
'}';
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/model/MetadataMessage.java | package ai.superstream.model;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import java.util.Objects;
/**
* Represents a message from the superstream.metadata_v1 topic.
*/
public class MetadataMessage {
private int superstreamClusterId;
private boolean active;
private List<TopicConfiguration> topicsConfiguration;
// Optional: override for client stats reporting interval (milliseconds). Can be absent (null)
@JsonProperty("report_interval_ms")
private Long reportIntervalMs;
public MetadataMessage() {
// Default constructor for Jackson
}
public MetadataMessage(int superstreamClusterId, boolean active, List<TopicConfiguration> topicsConfiguration) {
this.superstreamClusterId = superstreamClusterId;
this.active = active;
this.topicsConfiguration = topicsConfiguration;
}
@JsonProperty("superstream_cluster_id")
public int getSuperstreamClusterId() {
return superstreamClusterId;
}
@JsonProperty("superstream_cluster_id")
public void setSuperstreamClusterId(int superstreamClusterId) {
this.superstreamClusterId = superstreamClusterId;
}
public boolean isActive() {
return active;
}
public void setActive(boolean active) {
this.active = active;
}
@JsonProperty("topics_configuration")
public List<TopicConfiguration> getTopicsConfiguration() {
return topicsConfiguration;
}
@JsonProperty("topics_configuration")
public void setTopicsConfiguration(List<TopicConfiguration> topicsConfiguration) {
this.topicsConfiguration = topicsConfiguration;
}
// Getter & setter for new report interval field
@JsonProperty("report_interval_ms")
public Long getReportIntervalMs() {
return reportIntervalMs;
}
@JsonProperty("report_interval_ms")
public void setReportIntervalMs(Long reportIntervalMs) {
this.reportIntervalMs = reportIntervalMs;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MetadataMessage that = (MetadataMessage) o;
return superstreamClusterId == that.superstreamClusterId &&
active == that.active &&
Objects.equals(topicsConfiguration, that.topicsConfiguration);
}
@Override
public int hashCode() {
return Objects.hash(superstreamClusterId, active, topicsConfiguration);
}
@Override
public String toString() {
return "MetadataMessage{" +
"superstream_cluster_id=" + superstreamClusterId +
", active=" + active +
", topics_configuration=" + topicsConfiguration +
'}';
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/model/TopicConfiguration.java | package ai.superstream.model;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Map;
import java.util.Objects;
/**
* Represents the configuration for a specific Kafka topic.
*/
public class TopicConfiguration {
private String topicName;
private int potentialReductionPercentage;
private long dailyWritesBytes;
private Map<String, Object> optimizedConfiguration;
public TopicConfiguration() {
// Default constructor for Jackson
}
public TopicConfiguration(String topicName, int potentialReductionPercentage, long dailyWritesBytes,
Map<String, Object> optimizedConfiguration) {
this.topicName = topicName;
this.potentialReductionPercentage = potentialReductionPercentage;
this.dailyWritesBytes = dailyWritesBytes;
this.optimizedConfiguration = optimizedConfiguration;
}
@JsonProperty("topic_name")
public String getTopicName() {
return topicName;
}
@JsonProperty("topic_name")
public void setTopicName(String topicName) {
this.topicName = topicName;
}
@JsonProperty("potential_reduction_percentage")
public int getPotentialReductionPercentage() {
return potentialReductionPercentage;
}
@JsonProperty("potential_reduction_percentage")
public void setPotentialReductionPercentage(int potentialReductionPercentage) {
this.potentialReductionPercentage = potentialReductionPercentage;
}
@JsonProperty("daily_writes_bytes")
public long getDailyWritesBytes() {
return dailyWritesBytes;
}
@JsonProperty("daily_writes_bytes")
public void setDailyWritesBytes(long dailyWritesBytes) {
this.dailyWritesBytes = dailyWritesBytes;
}
@JsonProperty("optimized_configuration")
public Map<String, Object> getOptimizedConfiguration() {
return optimizedConfiguration;
}
@JsonProperty("optimized_configuration")
public void setOptimizedConfiguration(Map<String, Object> optimizedConfiguration) {
this.optimizedConfiguration = optimizedConfiguration;
}
/**
* Calculate the potential impact of optimization for this topic.
* @return The impact score, calculated as potentialReductionPercentage * dailyWritesBytes
*/
public long calculateImpactScore() {
return (long) (potentialReductionPercentage/100) * dailyWritesBytes;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TopicConfiguration that = (TopicConfiguration) o;
return potentialReductionPercentage == that.potentialReductionPercentage &&
dailyWritesBytes == that.dailyWritesBytes &&
Objects.equals(topicName, that.topicName) &&
Objects.equals(optimizedConfiguration, that.optimizedConfiguration);
}
@Override
public int hashCode() {
return Objects.hash(topicName, potentialReductionPercentage, dailyWritesBytes, optimizedConfiguration);
}
@Override
public String toString() {
return "TopicConfiguration{" +
"topic_name='" + topicName + '\'' +
", potential_reduction_percentage=" + potentialReductionPercentage +
", daily_writes_bytes=" + dailyWritesBytes +
", optimized_configuration=" + optimizedConfiguration +
'}';
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/util/ClientUtils.java | package ai.superstream.util;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
/**
* Utility class for client-related operations.
*/
public class ClientUtils {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(ClientUtils.class);
/**
* Get the complete producer configuration including default values.
* @param explicitConfig The explicitly set configuration
* @return A map containing all configurations including defaults
*/
public static Map<String, Object> getCompleteProducerConfig(Map<String, Object> explicitConfig) {
Map<String, Object> completeConfig = new HashMap<>();
try {
// Get the ProducerConfig class via reflection
Class<?> producerConfigClass = Class.forName("org.apache.kafka.clients.producer.ProducerConfig");
// Get access to the CONFIG static field which contains all default configurations
Field configField = producerConfigClass.getDeclaredField("CONFIG");
configField.setAccessible(true);
Object configDef = configField.get(null);
// Get the map of ConfigKey objects
Field configKeysField = configDef.getClass().getDeclaredField("configKeys");
configKeysField.setAccessible(true);
@SuppressWarnings("unchecked")
Map<String, Object> configKeys = (Map<String, Object>) configKeysField.get(configDef);
// For each config key, extract the default value
for (Map.Entry<String, Object> entry : configKeys.entrySet()) {
String configName = entry.getKey();
Object configKey = entry.getValue();
// Get the default value from the ConfigKey
Field defaultValueField = configKey.getClass().getDeclaredField("defaultValue");
defaultValueField.setAccessible(true);
Object defaultValue = defaultValueField.get(configKey);
if (defaultValue != null) {
completeConfig.put(configName, defaultValue);
}
}
// Override defaults with explicitly set configurations
completeConfig.putAll(explicitConfig);
// Remove sensitive authentication information
completeConfig.remove("ssl.keystore.password");
completeConfig.remove("ssl.key.password");
completeConfig.remove("ssl.truststore.password");
completeConfig.remove("basic.auth.user.info");
completeConfig.remove("sasl.jaas.config");
completeConfig.remove("sasl.client.callback.handler.class");
completeConfig.remove("sasl.login.callback.handler.class");
} catch (Exception e) {
logger.warn("Failed to extract default producer configs: " + e.getMessage(), e);
}
// If we couldn't get any defaults, just use the explicit config
if (completeConfig.isEmpty()) {
return new HashMap<>(explicitConfig);
}
return completeConfig;
}
/**
* Get the version of the Superstream Clients library.
* @return The version string
*/
public static String getClientVersion() {
// Option 1: Get version from package information (MANIFEST Implementation-Version)
Package pkg = ClientUtils.class.getPackage();
String version = (pkg != null) ? pkg.getImplementationVersion() : null;
// Option 2: If option 1 returns null (e.g., when running from IDE), try to read from a properties file
if (version == null) {
// First attempt: properties file located under META-INF (the path used during packaging)
version = readVersionFromProperties("/META-INF/superstream-version.properties");
// Second attempt: fallback to root path (older builds)
if (version == null) {
version = readVersionFromProperties("/superstream-version.properties");
}
}
// Option 3: If still null, use a hard-coded fallback
if (version == null) {
version = ""; // Default version if not found
}
return version;
}
/**
* Helper that tries to load the version property from the given resource path.
*
* @param resourcePath classpath resource path, e.g. "/META-INF/superstream-version.properties"
* @return the version value or null if not found / unreadable
*/
private static String readVersionFromProperties(String resourcePath) {
try (InputStream input = ClientUtils.class.getResourceAsStream(resourcePath)) {
if (input != null) {
Properties props = new Properties();
props.load(input);
String v = props.getProperty("version");
if (v != null && !v.trim().isEmpty()) {
return v.trim();
}
}
} catch (IOException ignored) {
// ignore and let caller handle fallback
}
return null;
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/util/EnvironmentVariables.java | package ai.superstream.util;
import java.util.HashMap;
import java.util.Map;
/**
* Utility class for collecting environment variables.
*/
public class EnvironmentVariables {
private static final String SUPERSTREAM_PREFIX = "SUPERSTREAM_";
/**
* Collects all environment variables that start with SUPERSTREAM_
* @return A map of environment variable names to their values
*/
public static Map<String, String> getSuperstreamEnvironmentVariables() {
Map<String, String> envVars = new HashMap<>();
Map<String, String> systemEnv = System.getenv();
for (Map.Entry<String, String> entry : systemEnv.entrySet()) {
if (entry.getKey().startsWith(SUPERSTREAM_PREFIX)) {
envVars.put(entry.getKey(), entry.getValue());
}
}
return envVars;
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/util/KafkaPropertiesUtils.java | package ai.superstream.util;
import java.util.Properties;
/**
* Utility class for Kafka properties management.
*/
public class KafkaPropertiesUtils {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(KafkaPropertiesUtils.class);
/**
* Copies essential client configuration properties from source to destination.
* This ensures internal Kafka clients have the same security, network, and connection
* configurations as the user's Kafka clients.
*
* @param source Source properties to copy from
* @param destination Destination properties to copy to
*/
public static void copyClientConfigurationProperties(Properties source, Properties destination) {
if (source == null || destination == null) {
logger.warn("Cannot copy client configuration properties: source or destination is null");
return;
}
// Client configuration properties to copy
String[] configProps = {
// Security protocol
"security.protocol",
// SSL properties
"ssl.truststore.location", "ssl.truststore.password",
"ssl.keystore.location", "ssl.keystore.password",
"ssl.key.password", "ssl.endpoint.identification.algorithm",
"ssl.truststore.type", "ssl.keystore.type", "ssl.secure.random.implementation",
"ssl.enabled.protocols", "ssl.cipher.suites", "ssl.protocol",
// SASL properties
"sasl.mechanism", "sasl.jaas.config",
"sasl.client.callback.handler.class", "sasl.login.callback.handler.class",
"sasl.login.class", "sasl.kerberos.service.name",
"sasl.kerberos.kinit.cmd", "sasl.kerberos.ticket.renew.window.factor",
"sasl.kerberos.ticket.renew.jitter", "sasl.kerberos.min.time.before.relogin",
"sasl.login.refresh.window.factor", "sasl.login.refresh.window.jitter",
"sasl.login.refresh.min.period.seconds", "sasl.login.refresh.buffer.seconds",
// Other important properties to preserve
"request.timeout.ms", "retry.backoff.ms", "connections.max.idle.ms",
"reconnect.backoff.ms", "reconnect.backoff.max.ms",
// DNS lookup configuration - critical for Consul/proxy scenarios
"client.dns.lookup",
// Socket timeout properties - important for proxy/load balancer scenarios
"socket.connection.setup.timeout.ms", "socket.connection.setup.timeout.max.ms",
// Metadata refresh properties - important for dynamic broker discovery
"metadata.max.age.ms", "metadata.max.idle.ms",
// Retry and delivery timeout - important for handling transient failures
"retries", "delivery.timeout.ms"
};
// Copy all properties if they exist in the source
for (String prop : configProps) {
if (source.containsKey(prop)) {
destination.put(prop, source.get(prop));
}
}
// Adjust JAAS config for shaded Kafka classes when used internally
Object jaasObj = destination.get("sasl.jaas.config");
if (jaasObj instanceof String) {
String jaas = (String) jaasObj;
// Build the prefix dynamically so the shade plugin does NOT relocate it
StringBuilder sb = new StringBuilder();
sb.append("org.");
sb.append("apache.kafka.common.security.");
String unshadedPrefix = sb.toString();
String shadedPrefix = "ai.superstream.shaded.org.apache.kafka.common.security.";
// Replace only if still un-shaded
if (jaas.contains(unshadedPrefix) && !jaas.contains(shadedPrefix)) {
String fixedJaas = jaas.replace(unshadedPrefix, shadedPrefix);
destination.put("sasl.jaas.config", fixedJaas);
}
}
}
} |
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/util/NetworkUtils.java | package ai.superstream.util;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.Enumeration;
/**
* Utility class for network-related operations.
*/
public class NetworkUtils {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(NetworkUtils.class);
private static String cachedIpAddress = null;
private static String cachedHostname = null;
/**
* Get the local IP address.
*
* @return The local IP address, or "unknown" if it can't be determined
*/
public static String getLocalIpAddress() {
if (cachedIpAddress != null) {
return cachedIpAddress;
}
try {
// Try to get the primary network interface's IP address
Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces();
if (interfaces == null) {
logger.warn("No network interfaces found");
return "";
}
while (interfaces.hasMoreElements()) {
NetworkInterface networkInterface = interfaces.nextElement();
if (networkInterface.isLoopback() || !networkInterface.isUp()) {
continue;
}
Enumeration<InetAddress> addresses = networkInterface.getInetAddresses();
while (addresses.hasMoreElements()) {
InetAddress address = addresses.nextElement();
if (address.getHostAddress().contains(".")) { // Prefer IPv4
cachedIpAddress = address.getHostAddress();
return cachedIpAddress;
}
}
}
// Fall back to the local host address
InetAddress localHost = InetAddress.getLocalHost();
cachedIpAddress = localHost.getHostAddress();
return cachedIpAddress;
} catch (SocketException | UnknownHostException e) {
logger.error("[ERR-033] Failed to determine local IP address: {}", e.getMessage(), e);
return "";
}
}
/**
* Get the local host name.
*
* @return The host name, or "unknown" if it can't be determined
*/
public static String getHostname() {
if (cachedHostname != null) {
return cachedHostname;
}
try {
InetAddress localHost = InetAddress.getLocalHost();
cachedHostname = localHost.getHostName();
return cachedHostname;
} catch (UnknownHostException e) {
logger.error("[ERR-091] Failed to determine local hostname: {}", e.getMessage(), e);
return "";
}
}
}
|
0 | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream | java-sources/ai/superstream/superstream-clients/1.0.208/ai/superstream/util/SuperstreamLogger.java | package ai.superstream.util;
/**
* Custom logger for the Superstream library that falls back to System.out/System.err
*/
public class SuperstreamLogger {
private static final String PREFIX = "superstream";
private final String className;
// Flag to control debug logging - default to false to hide debug logs
private static boolean debugEnabled = false;
static {
// Check if debug logging is enabled via system property or environment variable
String debugFlag = System.getProperty("superstream.debug");
if (debugFlag == null) {
debugFlag = System.getenv("SUPERSTREAM_DEBUG");
}
debugEnabled = "true".equalsIgnoreCase(debugFlag);
}
// Enable or disable debug logging programmatically
public static void setDebugEnabled(boolean enabled) {
debugEnabled = enabled;
}
private SuperstreamLogger(Class<?> clazz) {
this.className = clazz.getSimpleName();
}
/**
* Get a logger for the specified class.
*
* @param clazz The class to get the logger for
* @return A new SuperstreamLogger instance
*/
public static SuperstreamLogger getLogger(Class<?> clazz) {
return new SuperstreamLogger(clazz);
}
/**
* Log an info message.
*/
public void info(String message) {
System.out.println(formatLogMessage("INFO", message));
}
/**
* Log an info message with parameters.
*/
public void info(String message, Object... args) {
System.out.println(formatLogMessage("INFO", formatArgs(message, args)));
}
/**
* Log a warning message.
*/
public void warn(String message) {
System.out.println(formatLogMessage("WARN", message));
}
/**
* Log a warning message with parameters.
*/
public void warn(String message, Object... args) {
System.out.println(formatLogMessage("WARN", formatArgs(message, args)));
}
/**
* Log an error message with parameters.
*/
public void error(String message, Object... args) {
if (args != null && args.length > 0 && args[args.length - 1] instanceof Throwable) {
// If the last argument is a Throwable, format it properly
Throwable throwable = (Throwable) args[args.length - 1];
// Remove the Throwable from args array
Object[] messageArgs = new Object[args.length - 1];
System.arraycopy(args, 0, messageArgs, 0, args.length - 1);
// Format the message with the remaining args
String formattedMessage = formatArgs(message, messageArgs);
// Format the exception message
String formattedExceptionMessage = formatExceptionMessage(formattedMessage, throwable);
System.err.println(formatLogMessage("ERROR", formattedExceptionMessage));
} else {
System.err.println(formatLogMessage("ERROR", formatArgs(message, args)));
}
}
/**
* Log a debug message.
*/
public void debug(String message) {
if (debugEnabled) {
System.out.println(formatLogMessage("DEBUG", message));
}
}
/**
* Log a debug message with parameters.
*/
public void debug(String message, Object... args) {
if (debugEnabled) {
System.out.println(formatLogMessage("DEBUG", formatArgs(message, args)));
}
}
public static boolean isDebugEnabled() {
return debugEnabled;
}
/**
* Format a log message with the Superstream prefix and class name.
*/
private String formatLogMessage(String level, String message) {
return String.format("[%s] %s %s: %s", PREFIX, level, className, message);
}
/**
* Replace placeholder {} with actual values.
*/
private String formatArgs(String message, Object... args) {
if (args == null || args.length == 0) {
return message;
}
String result = message;
for (Object arg : args) {
int idx = result.indexOf("{}");
if (idx >= 0) {
result = result.substring(0, idx) +
(arg == null ? "null" : arg.toString()) +
result.substring(idx + 2);
} else {
break;
}
}
return result;
}
/**
* Format an exception message with class name, message and stack trace.
* This is a standardized way to format exception messages across the codebase.
* We are doing that because we saw that in some logging systems, the stack trace is not included in the error message unless it appears without a new line.
*/
private String formatExceptionMessage(String message, Throwable throwable) {
// Convert stack trace to string
java.io.StringWriter sw = new java.io.StringWriter();
java.io.PrintWriter pw = new java.io.PrintWriter(sw);
throwable.printStackTrace(pw);
String stackTrace = sw.toString().replaceAll("\\r?\\n", " ");
return String.format("%s. Error: %s - %s. Stack trace: %s",
message,
throwable.getClass().getName(),
throwable.getMessage(),
stackTrace);
}
} |
0 | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream/agent/KafkaProducerInterceptor.java | package ai.superstream.agent;
import ai.superstream.core.SuperstreamManager;
import ai.superstream.util.SuperstreamLogger;
import net.bytebuddy.asm.Advice;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.Properties;
import java.util.Map;
/**
* Intercepts KafkaProducer constructor calls to optimize configurations.
*/
public class KafkaProducerInterceptor {
public static final SuperstreamLogger logger = SuperstreamLogger.getLogger(KafkaProducerInterceptor.class);
/**
* Called before the KafkaProducer constructor.
*
* @param args The producer properties
*/
@Advice.OnMethodEnter
public static void onEnter(@Advice.AllArguments Object[] args) {
// Check if this is a direct call from application code or an internal delegation
if (!isInitialProducerCreation()) {
logger.debug("Skipping internal constructor delegation");
return;
}
// Extract Properties or Map from the arguments
Properties properties = extractProperties(args);
if (properties == null) {
logger.debug("Could not extract properties from constructor arguments");
return;
}
// Make a copy of the original properties in case we need to restore them
Properties originalProperties = new Properties();
originalProperties.putAll(properties);
try {
// Skip if we're already in the process of optimizing
if (SuperstreamManager.isOptimizationInProgress()) {
logger.debug("Skipping interception as optimization is already in progress");
return;
}
if (properties.isEmpty()) {
logger.warn("Could not extract properties from properties");
return;
}
// Skip producers created by the Superstream library
String clientId = properties.getProperty("client.id", "");
if (clientId.startsWith("superstreamlib-")) {
logger.debug("Skipping optimization for Superstream internal producer: {}", clientId);
return;
}
logger.info("Intercepted KafkaProducer constructor");
// Extract bootstrap servers and client id
String bootstrapServers = properties.getProperty("bootstrap.servers");
if (bootstrapServers == null || bootstrapServers.trim().isEmpty()) {
logger.warn("bootstrap.servers is not set, cannot optimize");
return;
}
// Optimize the producer
boolean success = SuperstreamManager.getInstance().optimizeProducer(bootstrapServers, clientId, properties);
if (!success) {
// Restore original properties if optimization failed
properties.clear();
properties.putAll(originalProperties);
}
} catch (Exception e) {
// Restore original properties on any exception
properties.clear();
properties.putAll(originalProperties);
logger.error("Error during producer optimization, restored original properties", e);
}
}
/**
* Extract Properties object from constructor arguments.
*/
public static Properties extractProperties(Object[] args) {
// Look for Properties or Map in the arguments
for (Object arg : args) {
if (arg == null) continue;
if (arg instanceof Properties) {
return (Properties) arg;
}
if (arg instanceof Map) {
try {
@SuppressWarnings("unchecked")
Map<String, Object> map = (Map<String, Object>) arg;
Properties props = new Properties();
for (Map.Entry<String, Object> entry : map.entrySet()) {
if (entry.getValue() != null) {
props.put(entry.getKey(), entry.getValue());
}
}
return props;
} catch (ClassCastException e) {
// Not the map type we expected
logger.debug("Could not cast Map to Map<String, Object>");
}
}
// Handle ProducerConfig object which contains properties
String className = arg.getClass().getName();
if (className.endsWith("ProducerConfig")) {
try {
// Try multiple possible field names
String[] fieldNames = {"originals", "values", "props", "properties", "configs"};
for (String fieldName : fieldNames) {
try {
Field field = arg.getClass().getDeclaredField(fieldName);
field.setAccessible(true);
Object fieldValue = field.get(arg);
if (fieldValue instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> map = (Map<String, Object>) fieldValue;
Properties props = new Properties();
for (Map.Entry<String, Object> entry : map.entrySet()) {
if (entry.getValue() != null) {
props.put(entry.getKey(), entry.getValue());
}
}
return props;
} else if (fieldValue instanceof Properties) {
return (Properties) fieldValue;
}
} catch (NoSuchFieldException e) {
// Field doesn't exist, try the next one
continue;
}
}
// Try to call getters if field access failed
for (Method method : arg.getClass().getMethods()) {
if ((method.getName().equals("originals") ||
method.getName().equals("values") ||
method.getName().equals("configs") ||
method.getName().equals("properties") ||
method.getName().equals("getOriginals") ||
method.getName().equals("getValues") ||
method.getName().equals("getConfigs") ||
method.getName().equals("getProperties")) &&
method.getParameterCount() == 0) {
Object result = method.invoke(arg);
if (result instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> map = (Map<String, Object>) result;
Properties props = new Properties();
for (Map.Entry<String, Object> entry : map.entrySet()) {
if (entry.getValue() != null) {
props.put(entry.getKey(), entry.getValue());
}
}
return props;
} else if (result instanceof Properties) {
return (Properties) result;
}
}
}
// Last resort: Try to get the ProducerConfig's bootstrap.servers value
// and create a minimal Properties object
for (Method method : arg.getClass().getMethods()) {
if (method.getName().equals("getString") && method.getParameterCount() == 1) {
try {
String bootstrapServers = (String) method.invoke(arg, "bootstrap.servers");
String clientId = (String) method.invoke(arg, "client.id");
if (bootstrapServers != null) {
Properties minProps = new Properties();
minProps.put("bootstrap.servers", bootstrapServers);
if (clientId != null) {
minProps.put("client.id", clientId);
}
return minProps;
}
} catch (Exception e) {
logger.debug("Failed to get bootstrap.servers from ProducerConfig");
}
}
}
} catch (Exception e) {
logger.debug("Failed to extract properties from ProducerConfig: " + e.getMessage());
}
}
}
return null;
}
/**
* Determines if this constructor call is the initial creation from application code
* rather than an internal delegation between constructors.
*/
public static boolean isInitialProducerCreation() {
StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
// Start from index 1 to skip getStackTrace() itself
boolean foundKafkaProducer = false;
int kafkaProducerCount = 0;
for (int i = 1; i < stackTrace.length; i++) {
String className = stackTrace[i].getClassName();
// Look for KafkaProducer in the class name
if (className.endsWith("KafkaProducer")) {
foundKafkaProducer = true;
kafkaProducerCount++;
// If we find more than one KafkaProducer in the stack, it's a delegation
if (kafkaProducerCount > 1) {
return false;
}
}
// Once we've seen KafkaProducer and then see a different class,
// we've found the actual caller
else if (foundKafkaProducer) {
// Skip certain framework classes that might wrap the call
if (className.startsWith("java.") ||
className.startsWith("javax.") ||
className.startsWith("sun.") ||
className.startsWith("com.sun.")) {
continue;
}
// We've found the application class that called KafkaProducer
logger.debug("Detected initial producer creation from: " + className);
return true;
}
}
// If we make it here with exactly one KafkaProducer in the stack, it's likely
// the initial creation (first constructor being called)
return kafkaProducerCount == 1;
}
}
|
0 | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream/agent/SuperstreamAgent.java | package ai.superstream.agent;
import ai.superstream.util.SuperstreamLogger;
import net.bytebuddy.agent.builder.AgentBuilder;
import net.bytebuddy.asm.Advice;
import net.bytebuddy.matcher.ElementMatchers;
import java.lang.instrument.Instrumentation;
/**
* Java agent entry point for the Superstream library.
*/
public class SuperstreamAgent {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(SuperstreamAgent.class);
/**
* Premain method, called when the agent is loaded during JVM startup.
*
* @param arguments Agent arguments
* @param instrumentation Instrumentation instance
*/
public static void premain(String arguments, Instrumentation instrumentation) {
logger.info("Superstream Agent initialized");
install(instrumentation);
}
/**
* AgentMain method, called when the agent is loaded after JVM startup.
*
* @param arguments Agent arguments
* @param instrumentation Instrumentation instance
*/
public static void agentmain(String arguments, Instrumentation instrumentation) {
logger.info("Superstream Agent initialized (dynamic attach)");
install(instrumentation);
}
/**
* Install the agent instrumentation.
*
* @param instrumentation Instrumentation instance
*/
private static void install(Instrumentation instrumentation) {
// Intercept KafkaProducer constructor
new AgentBuilder.Default()
.disableClassFormatChanges()
.type(ElementMatchers.named("org.apache.kafka.clients.producer.KafkaProducer"))
.transform((builder, typeDescription, classLoader, module, protectionDomain) ->
builder.visit(Advice.to(KafkaProducerInterceptor.class)
.on(ElementMatchers.isConstructor())))
.installOn(instrumentation);
logger.info("Superstream Agent successfully installed instrumentation");
}
} |
0 | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream/core/ClientReporter.java | package ai.superstream.core;
import ai.superstream.model.ClientMessage;
import ai.superstream.util.NetworkUtils;
import ai.superstream.util.SuperstreamLogger;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* Reports client information to the superstream.clients topic.
*/
public class ClientReporter {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(ClientReporter.class);
private static final String CLIENTS_TOPIC = "superstream.clients";
private static final ObjectMapper objectMapper = new ObjectMapper();
private static final String CLIENT_VERSION = getClientVersion();
private static final String LANGUAGE = "Java";
private static final String CLIENT_TYPE = "producer"; // for now support only producers
/**
* Report client information to the superstream.clients topic.
*
* @param bootstrapServers The Kafka bootstrap servers
* @param superstreamClusterId The superstream cluster ID
* @param active Whether the superstream optimization is active
* @param clientId The client ID
* @param originalConfiguration The original configuration
* @param optimizedConfiguration The optimized configuration
* @return True if the message was sent successfully, false otherwise
*/
public boolean reportClient(String bootstrapServers, Properties originalClientProperties, int superstreamClusterId, boolean active,
String clientId, Map<String, Object> originalConfiguration,
Map<String, Object> optimizedConfiguration) {
Properties properties = new Properties();
// Copy all authentication-related and essential properties from the original client
copyAuthenticationProperties(originalClientProperties, properties);
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
properties.put(ProducerConfig.CLIENT_ID_CONFIG, "superstreamlib-client-reporter");
properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "zstd");
properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384); // 16KB batch size
properties.put(ProducerConfig.LINGER_MS_CONFIG, 1000); // 500ms linger
try (Producer<String, String> producer = new KafkaProducer<>(properties)) {
// Create the client message
ClientMessage message = new ClientMessage(
superstreamClusterId,
active,
clientId,
NetworkUtils.getLocalIpAddress(),
CLIENT_VERSION,
LANGUAGE,
CLIENT_TYPE,
getCompleteProducerConfig(originalConfiguration),
optimizedConfiguration
);
// Convert the message to JSON
String json = objectMapper.writeValueAsString(message);
// Send the message
ProducerRecord<String, String> record = new ProducerRecord<>(CLIENTS_TOPIC, json);
producer.send(record).get(5, TimeUnit.SECONDS);
logger.info("Successfully reported client information to {}", CLIENTS_TOPIC);
return true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.error("Interrupted while reporting client information", e);
return false;
} catch (ExecutionException e) {
logger.error("Failed to report client information", e);
return false;
} catch (TimeoutException e) {
logger.error("Timed out while reporting client information", e);
return false;
} catch (Exception e) {
logger.error("Error reporting client information", e);
return false;
}
}
/**
* Get the complete producer configuration including default values.
* @param explicitConfig The explicitly set configuration
* @return A map containing all configurations including defaults
*/
private Map<String, Object> getCompleteProducerConfig(Map<String, Object> explicitConfig) {
Map<String, Object> completeConfig = new HashMap<>();
try {
// Get the ProducerConfig class via reflection
Class<?> producerConfigClass = Class.forName("org.apache.kafka.clients.producer.ProducerConfig");
// Get access to the CONFIG static field which contains all default configurations
Field configField = producerConfigClass.getDeclaredField("CONFIG");
configField.setAccessible(true);
Object configDef = configField.get(null);
// Get the map of ConfigKey objects
Field configKeysField = configDef.getClass().getDeclaredField("configKeys");
configKeysField.setAccessible(true);
@SuppressWarnings("unchecked")
Map<String, Object> configKeys = (Map<String, Object>) configKeysField.get(configDef);
// For each config key, extract the default value
for (Map.Entry<String, Object> entry : configKeys.entrySet()) {
String configName = entry.getKey();
Object configKey = entry.getValue();
// Get the default value from the ConfigKey
Field defaultValueField = configKey.getClass().getDeclaredField("defaultValue");
defaultValueField.setAccessible(true);
Object defaultValue = defaultValueField.get(configKey);
if (defaultValue != null) {
completeConfig.put(configName, defaultValue);
}
}
// Override defaults with explicitly set configurations
completeConfig.putAll(explicitConfig);
// Remove sensitive authentication information
completeConfig.remove("ssl.keystore.password");
completeConfig.remove("ssl.key.password");
completeConfig.remove("ssl.truststore.password");
completeConfig.remove("sasl.jaas.config");
completeConfig.remove("sasl.client.callback.handler.class");
completeConfig.remove("sasl.login.callback.handler.class");
} catch (Exception e) {
logger.warn("Failed to extract default producer configs: " + e.getMessage(), e);
}
// If we couldn't get any defaults, just use the explicit config
if (completeConfig.isEmpty()) {
return new HashMap<>(explicitConfig);
}
return completeConfig;
}
// Helper method to copy authentication properties
private void copyAuthenticationProperties(Properties source, Properties destination) {
// Authentication-related properties
String[] authProps = {
// Security protocol
"security.protocol",
// SSL properties
"ssl.truststore.location", "ssl.truststore.password",
"ssl.keystore.location", "ssl.keystore.password",
"ssl.key.password", "ssl.endpoint.identification.algorithm",
"ssl.truststore.type", "ssl.keystore.type", "ssl.secure.random.implementation",
"ssl.enabled.protocols", "ssl.cipher.suites",
// SASL properties
"sasl.mechanism", "sasl.jaas.config",
"sasl.client.callback.handler.class", "sasl.login.callback.handler.class",
"sasl.login.class", "sasl.kerberos.service.name",
"sasl.kerberos.kinit.cmd", "sasl.kerberos.ticket.renew.window.factor",
"sasl.kerberos.ticket.renew.jitter", "sasl.kerberos.min.time.before.relogin",
"sasl.login.refresh.window.factor", "sasl.login.refresh.window.jitter",
"sasl.login.refresh.min.period.seconds", "sasl.login.refresh.buffer.seconds",
// Other important properties to preserve
"request.timeout.ms", "retry.backoff.ms", "connections.max.idle.ms",
"reconnect.backoff.ms", "reconnect.backoff.max.ms"
};
// Copy all authentication properties if they exist in the source
for (String prop : authProps) {
if (source.containsKey(prop)) {
destination.put(prop, source.get(prop));
}
}
}
/**
* Get the version of the Superstream Clients library.
* @return The version string
*/
private static String getClientVersion() {
// Option 1: Get version from package information
Package pkg = ClientReporter.class.getPackage();
String version = pkg.getImplementationVersion();
// Option 2: If option 1 returns null (e.g., when running from IDE), use a fallback
if (version == null) {
// Try to read from a properties file
try (InputStream input = ClientReporter.class.getResourceAsStream("/superstream-version.properties")) {
if (input != null) {
Properties props = new Properties();
props.load(input);
version = props.getProperty("version");
}
} catch (IOException e) {
// Ignore
}
// If still null, use a hardcoded fallback
if (version == null) {
version = "1.0.0"; // Default version if not found
}
}
return version;
}
} |
0 | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream/core/ConfigurationOptimizer.java | package ai.superstream.core;
import ai.superstream.model.MetadataMessage;
import ai.superstream.model.TopicConfiguration;
import ai.superstream.util.SuperstreamLogger;
import java.util.*;
import java.util.stream.Collectors;
/**
* Optimizes Kafka producer configurations based on metadata.
*/
public class ConfigurationOptimizer {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(ConfigurationOptimizer.class);
private static final String LATENCY_SENSITIVE_ENV_VAR = "SUPERSTREAM_LATENCY_SENSITIVE";
/**
* Get the optimal configuration for a set of topics.
*
* @param metadataMessage The metadata message
* @param applicationTopics The list of topics that the application might produce to
* @return The optimal configuration, or an empty map if no optimization is possible
*/
public Map<String, Object> getOptimalConfiguration(MetadataMessage metadataMessage, List<String> applicationTopics) {
// Check if the application is latency-sensitive
boolean isLatencySensitive = isLatencySensitive();
if (isLatencySensitive) {
logger.info("Application is marked as latency-sensitive, linger.ms will not be modified");
}
// Get all matching topic configurations
List<TopicConfiguration> matchingConfigurations = metadataMessage.getTopicsConfiguration().stream()
.filter(config -> applicationTopics.contains(config.getTopicName()))
.collect(Collectors.toList());
Map<String, Object> optimalConfiguration;
if (matchingConfigurations.isEmpty()) {
if (applicationTopics.isEmpty()) {
logger.info("SUPERSTREAM_TOPICS_LIST environment variable contains no topics. Applying default optimizations.");
} else {
logger.info("No matching topic configurations found for the application topics. Applying default optimizations.");
}
// Apply default optimizations when no matching topics found
optimalConfiguration = new HashMap<>();
optimalConfiguration.put("compression.type", "zstd");
optimalConfiguration.put("batch.size", 16384); // 16KB
// Only add linger if not latency-sensitive
if (!isLatencySensitive) {
optimalConfiguration.put("linger.ms", 5000); // 5 seconds default
logger.info("Default optimizations will be applied: compression.type=zstd, batch.size=16384, linger.ms=5000");
} else {
logger.info("Default optimizations will be applied: compression.type=zstd, batch.size=16384 (linger.ms unchanged)");
}
return optimalConfiguration;
}
// Find the most impactful topic
TopicConfiguration mostImpactfulTopic = matchingConfigurations.stream()
.max(Comparator.comparing(TopicConfiguration::calculateImpactScore))
.orElse(null);
optimalConfiguration = new HashMap<>(mostImpactfulTopic.getOptimizedConfiguration());
// If latency sensitive, remove linger.ms setting
if (isLatencySensitive && optimalConfiguration.containsKey("linger.ms")) {
optimalConfiguration.remove("linger.ms");
logger.info("Ignore linger.ms from optimizations due to latency-sensitive configuration");
}
return optimalConfiguration;
}
/**
* Apply the optimal configuration to the producer properties.
*
* @param properties The producer properties to modify
* @param optimalConfiguration The optimal configuration to apply
* @return The list of configuration keys that were modified
*/
public List<String> applyOptimalConfiguration(Properties properties, Map<String, Object> optimalConfiguration) {
if (optimalConfiguration == null || optimalConfiguration.isEmpty()) {
return Collections.emptyList();
}
List<String> modifiedKeys = new ArrayList<>();
for (Map.Entry<String, Object> entry : optimalConfiguration.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
// Special handling for linger.ms
if ("linger.ms".equals(key)) {
// Skip linger.ms if the application is latency-sensitive
if (isLatencySensitive()) {
logger.info("Skipping linger.ms optimization due to latency-sensitive configuration");
continue;
}
int optimalLinger = value instanceof Number ?
((Number) value).intValue() : Integer.parseInt(value.toString());
// Check if there's an existing linger setting
Object existingValue = properties.get(key);
if (existingValue != null) {
int existingLinger;
if (existingValue instanceof Number) {
existingLinger = ((Number) existingValue).intValue();
} else {
try {
existingLinger = Integer.parseInt(existingValue.toString());
} catch (NumberFormatException e) {
logger.warn("Invalid existing linger.ms value: {}. Will use optimal value.", existingValue);
existingLinger = 0;
}
}
// Use the greater of optimal and existing linger values
if (existingLinger > optimalLinger) {
logger.info("Keeping existing linger.ms value {} as it's greater than optimal value {}",
existingLinger, optimalLinger);
continue; // Skip this key, keeping the existing value
}
}
}
// Validate the configuration before applying
if (!isValidConfiguration(key, value)) {
logger.warn("Invalid configuration value for {}: {}. Skipping this parameter.", key, value);
continue;
}
// Store the original value for logging
Object originalValue = properties.get(key);
// Apply the optimization
properties.put(key, value);
modifiedKeys.add(key);
if (originalValue == null) {
logger.info("Setting configuration: {}={} (was not previously set)", key, value);
} else {
logger.info("Overriding configuration: {}={} (was: {})", key, value, originalValue);
}
}
return modifiedKeys;
}
private boolean isValidConfiguration(String key, Object value) {
try {
if ("compression.type".equals(key)) {
String compressionType = value.toString();
// Valid compression types in Kafka
return Arrays.asList("none", "gzip", "snappy", "lz4", "zstd").contains(compressionType);
}
// Add validation for other key types as needed
return true;
} catch (Exception e) {
logger.warn("Error validating configuration {}: {}", key, value, e);
return false;
}
}
/**
* Determine if the application is latency-sensitive based on environment variable.
*
* @return true if the application is latency-sensitive, false otherwise
*/
private boolean isLatencySensitive() {
String latencySensitiveStr = System.getenv(LATENCY_SENSITIVE_ENV_VAR);
if (latencySensitiveStr != null && !latencySensitiveStr.trim().isEmpty()) {
return Boolean.parseBoolean(latencySensitiveStr.trim());
}
return false; // Default to not latency-sensitive
}
} |
0 | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream/core/MetadataConsumer.java | package ai.superstream.core;
import ai.superstream.model.MetadataMessage;
import ai.superstream.util.SuperstreamLogger;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.io.IOException;
import java.time.Duration;
import java.util.*;
/**
* Consumes messages from the superstream.metadata_v1 topic.
*/
public class MetadataConsumer {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(MetadataConsumer.class);
private static final String METADATA_TOPIC = "superstream.metadata_v1";
private static final ObjectMapper objectMapper = new ObjectMapper();
/**
* Get the metadata message from the Kafka cluster.
*
* @param bootstrapServers The Kafka bootstrap servers
* @return The metadata message, or null if there was an error
*/
public MetadataMessage getMetadataMessage(String bootstrapServers, Properties originalClientProperties) {
Properties properties = new Properties();
// Copy all authentication-related and essential properties from the original client
copyAuthenticationProperties(originalClientProperties, properties);
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "superstream-metadata-consumer-" + UUID.randomUUID());
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
properties.put(ConsumerConfig.CLIENT_ID_CONFIG, "superstreamlib-metadata-consumer");
try (Consumer<String, String> consumer = new KafkaConsumer<>(properties)) {
// Check if the metadata topic exists
Set<String> topics = consumer.listTopics().keySet();
if (!topics.contains(METADATA_TOPIC)) {
logger.warn("The {} topic does not exist on the Kafka cluster at {}", METADATA_TOPIC, bootstrapServers);
return null;
}
// Assign the metadata topic
TopicPartition partition = new TopicPartition(METADATA_TOPIC, 0);
consumer.assign(Collections.singletonList(partition));
// Seek to the end and get the current offset
consumer.seekToEnd(Collections.singletonList(partition));
long endOffset = consumer.position(partition);
if (endOffset == 0) {
logger.warn("The {} topic is empty", METADATA_TOPIC);
return null;
}
// Seek to the last message
consumer.seek(partition, endOffset - 1);
// Poll for the message
ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(5));
if (records.isEmpty()) {
logger.warn("Failed to retrieve a message from the {} topic", METADATA_TOPIC);
return null;
}
// Parse the message
String json = records.iterator().next().value();
return objectMapper.readValue(json, MetadataMessage.class);
} catch (IOException e) {
logger.error("Failed to parse the metadata message", e);
return null;
} catch (Exception e) {
logger.error("Failed to retrieve the metadata message", e);
return null;
}
}
// Helper method to copy authentication properties
private void copyAuthenticationProperties(Properties source, Properties destination) {
// Authentication-related properties
String[] authProps = {
// Security protocol
"security.protocol",
// SSL properties
"ssl.truststore.location", "ssl.truststore.password",
"ssl.keystore.location", "ssl.keystore.password",
"ssl.key.password", "ssl.endpoint.identification.algorithm",
"ssl.truststore.type", "ssl.keystore.type", "ssl.secure.random.implementation",
"ssl.enabled.protocols", "ssl.cipher.suites",
// SASL properties
"sasl.mechanism", "sasl.jaas.config",
"sasl.client.callback.handler.class", "sasl.login.callback.handler.class",
"sasl.login.class", "sasl.kerberos.service.name",
"sasl.kerberos.kinit.cmd", "sasl.kerberos.ticket.renew.window.factor",
"sasl.kerberos.ticket.renew.jitter", "sasl.kerberos.min.time.before.relogin",
"sasl.login.refresh.window.factor", "sasl.login.refresh.window.jitter",
"sasl.login.refresh.min.period.seconds", "sasl.login.refresh.buffer.seconds",
// Other important properties to preserve
"request.timeout.ms", "retry.backoff.ms", "connections.max.idle.ms",
"reconnect.backoff.ms", "reconnect.backoff.max.ms"
};
// Copy all authentication properties if they exist in the source
for (String prop : authProps) {
if (source.containsKey(prop)) {
destination.put(prop, source.get(prop));
}
}
}
} |
0 | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream/core/SuperstreamManager.java | package ai.superstream.core;
import ai.superstream.model.MetadataMessage;
import ai.superstream.util.SuperstreamLogger;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
/**
* Main manager class for the Superstream library.
*/
public class SuperstreamManager {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(SuperstreamManager.class);
private static final String TOPICS_ENV_VAR = "SUPERSTREAM_TOPICS_LIST";
private static final String DISABLED_ENV_VAR = "SUPERSTREAM_DISABLED";
private static final ThreadLocal<Boolean> OPTIMIZATION_IN_PROGRESS = new ThreadLocal<>();
private static volatile SuperstreamManager instance;
private final MetadataConsumer metadataConsumer;
private final ClientReporter clientReporter;
private final ConfigurationOptimizer configurationOptimizer;
private final Map<String, MetadataMessage> metadataCache;
private final boolean disabled;
private SuperstreamManager() {
this.metadataConsumer = new MetadataConsumer();
this.clientReporter = new ClientReporter();
this.configurationOptimizer = new ConfigurationOptimizer();
this.metadataCache = new ConcurrentHashMap<>();
this.disabled = Boolean.parseBoolean(System.getenv(DISABLED_ENV_VAR));
if (disabled) {
logger.info("Superstream optimization is disabled via environment variable");
}
}
/**
* Check if optimization is already in progress for the current thread.
*
* @return true if optimization is in progress, false otherwise
*/
public static boolean isOptimizationInProgress() {
return Boolean.TRUE.equals(OPTIMIZATION_IN_PROGRESS.get());
}
/**
* Set the optimization in progress flag for the current thread.
*
* @param inProgress true if optimization is in progress, false otherwise
*/
public static void setOptimizationInProgress(boolean inProgress) {
if (inProgress) {
OPTIMIZATION_IN_PROGRESS.set(Boolean.TRUE);
} else {
OPTIMIZATION_IN_PROGRESS.remove();
}
}
/**
* Get the singleton instance of the SuperstreamManager.
*
* @return The SuperstreamManager instance
*/
public static SuperstreamManager getInstance() {
if (instance == null) {
synchronized (SuperstreamManager.class) {
if (instance == null) {
instance = new SuperstreamManager();
}
}
}
return instance;
}
public static Map<String, Object> convertPropertiesToMap(Properties properties) {
Map<String, Object> map = new HashMap<>();
for (String name : properties.stringPropertyNames()) {
map.put(name, properties.getProperty(name));
}
return map;
}
/**
* Optimize the producer properties for a given Kafka cluster.
*
* @param bootstrapServers The Kafka bootstrap servers
* @param clientId The client ID
* @param properties The producer properties to optimize
* @return True if the optimization was successful, false otherwise
*/
public boolean optimizeProducer(String bootstrapServers, String clientId, Properties properties) {
if (disabled) {
return false;
}
// Skip if already optimizing (prevents infinite recursion)
if (isOptimizationInProgress()) {
logger.debug("Skipping optimization for producer {} as optimization is already in progress", clientId);
return false;
}
try {
// Mark optimization as in progress for this thread
setOptimizationInProgress(true);
// Get or fetch the metadata message
MetadataMessage metadataMessage = getOrFetchMetadataMessage(bootstrapServers, properties);
if (metadataMessage == null) {
logger.warn("No metadata message available for {}, skipping optimization", bootstrapServers);
return false;
}
// Check if optimization is active
if (!metadataMessage.isActive()) {
logger.info("Superstream optimization is not active for this kafka cluster, please head to the Superstream console and activate it.");
return false;
}
// Get the application topics
List<String> applicationTopics = getApplicationTopics();
// Get the optimal configuration
Map<String, Object> optimalConfiguration = configurationOptimizer.getOptimalConfiguration(
metadataMessage, applicationTopics);
// Create a copy of the original configuration for reporting
Properties originalProperties = new Properties();
originalProperties.putAll(properties);
// Apply the optimal configuration
List<String> modifiedKeys = configurationOptimizer.applyOptimalConfiguration(properties, optimalConfiguration);
if (modifiedKeys.isEmpty()) {
logger.info("No configuration parameters were modified");
reportClientInformation(bootstrapServers, properties, metadataMessage, clientId, originalProperties, Collections.emptyMap());
return false;
}
// Extract the optimized configuration for reporting
Map<String, Object> optimizedProperties = new HashMap<>();
for (String key : modifiedKeys) {
optimizedProperties.put(key, properties.get(key));
}
// Report client information
reportClientInformation(
bootstrapServers,
properties,
metadataMessage,
clientId,
originalProperties,
optimizedProperties
);
logger.info("Successfully optimized producer configuration for {}", clientId);
return true;
} catch (Exception e) {
logger.error("Failed to optimize producer configuration", e);
return false;
} finally {
// Always clear the flag when done
setOptimizationInProgress(false);
}
}
/**
* Get the metadata message for a given Kafka cluster.
*
* @param bootstrapServers The Kafka bootstrap servers
* @return The metadata message, or null if it couldn't be retrieved
*/
private MetadataMessage getOrFetchMetadataMessage(String bootstrapServers, Properties originalProperties) {
// Check the cache first
if (metadataCache.containsKey(bootstrapServers)) {
return metadataCache.get(bootstrapServers);
}
// Fetch the metadata
MetadataMessage metadataMessage = metadataConsumer.getMetadataMessage(bootstrapServers, originalProperties);
if (metadataMessage != null) {
metadataCache.put(bootstrapServers, metadataMessage);
}
return metadataMessage;
}
/**
* Get the list of application topics from the environment variable.
*
* @return The list of application topics
*/
private List<String> getApplicationTopics() {
String topicsString = System.getenv(TOPICS_ENV_VAR);
if (topicsString == null || topicsString.trim().isEmpty()) {
return Collections.emptyList();
}
return Arrays.stream(topicsString.split(","))
.map(String::trim)
.filter(s -> !s.isEmpty())
.collect(java.util.stream.Collectors.toList());
}
/**
* Report client information to the superstream.clients topic.
*
* @param bootstrapServers The Kafka bootstrap servers
* @param metadataMessage The metadata message
* @param clientId The client ID
* @param originalConfiguration The original configuration
* @param optimizedConfiguration The optimized configuration
*/
private void reportClientInformation(String bootstrapServers, Properties originalProperties, MetadataMessage metadataMessage,
String clientId, Properties originalConfiguration,
Map<String, Object> optimizedConfiguration) {
try {
Map<String, Object> originalConfiguration1 = convertPropertiesToMap(originalConfiguration);
boolean success = clientReporter.reportClient(
bootstrapServers,
originalProperties,
metadataMessage.getSuperstreamClusterId(),
metadataMessage.isActive(),
clientId,
originalConfiguration1,
optimizedConfiguration
);
if (!success) {
logger.warn("Failed to report client information to the superstream.clients topic");
}
} catch (Exception e) {
logger.error("Error reporting client information", e);
}
}
} |
0 | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream/model/ClientMessage.java | package ai.superstream.model;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Map;
import java.util.Objects;
/**
* Represents a message to be sent to the superstream.clients topic.
*/
public class ClientMessage {
private int superstreamClusterId;
private boolean active;
private String clientId;
private String ipAddress;
private String clientVersion;
private String language;
private String clientType;
private Map<String, Object> originalConfiguration;
private Map<String, Object> optimizedConfiguration;
public ClientMessage() {
// Default constructor for Jackson
}
public ClientMessage(int superstreamClusterId, boolean active, String clientId, String ipAddress, String clientVersion, String language, String clientType,
Map<String, Object> originalConfiguration, Map<String, Object> optimizedConfiguration) {
this.superstreamClusterId = superstreamClusterId;
this.active = active;
this.clientId = clientId;
this.ipAddress = ipAddress;
this.clientVersion = clientVersion;
this.language = language;
this.clientType = clientType;
this.originalConfiguration = originalConfiguration;
this.optimizedConfiguration = optimizedConfiguration;
}
@JsonProperty("superstream_cluster_id")
public int getSuperstreamClusterId() {
return superstreamClusterId;
}
@JsonProperty("superstream_cluster_id")
public void setSuperstreamClusterId(int superstreamClusterId) {
this.superstreamClusterId = superstreamClusterId;
}
@JsonProperty("active")
public boolean isActive() {
return active;
}
@JsonProperty("active")
public void setActive(boolean active) {
this.active = active;
}
@JsonProperty("client_id")
public String getClientId() {
return clientId;
}
@JsonProperty("client_id")
public void setClientId(String clientId) {
this.clientId = clientId;
}
@JsonProperty("ip_address")
public String getIpAddress() {
return ipAddress;
}
@JsonProperty("ip_address")
public void setIpAddress(String ipAddress) {
this.ipAddress = ipAddress;
}
@JsonProperty("version")
public String getClientVersion() {
return clientVersion;
}
@JsonProperty("version")
public void setClientVersion(String clientVersion) {
this.clientVersion = clientVersion;
}
@JsonProperty("language")
public String getLanguage() {
return language;
}
@JsonProperty("language")
public void setLanguage(String language) {
this.language = language;
}
@JsonProperty("client_type")
public String getClientType() {
return clientType;
}
@JsonProperty("client_type")
public void setClientType(String clientType) {
this.clientType = clientType;
}
@JsonProperty("original_configuration")
public Map<String, Object> getOriginalConfiguration() {
return originalConfiguration;
}
@JsonProperty("original_configuration")
public void setOriginalConfiguration(Map<String, Object> originalConfiguration) {
this.originalConfiguration = originalConfiguration;
}
@JsonProperty("optimized_configuration")
public Map<String, Object> getOptimizedConfiguration() {
return optimizedConfiguration;
}
@JsonProperty("optimized_configuration")
public void setOptimizedConfiguration(Map<String, Object> optimizedConfiguration) {
this.optimizedConfiguration = optimizedConfiguration;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClientMessage that = (ClientMessage) o;
return superstreamClusterId == that.superstreamClusterId &&
active == that.active &&
Objects.equals(clientId, that.clientId) &&
Objects.equals(ipAddress, that.ipAddress) &&
Objects.equals(clientVersion, that.clientVersion) &&
Objects.equals(language, that.language) &&
Objects.equals(clientType, that.clientType) &&
Objects.equals(originalConfiguration, that.originalConfiguration) &&
Objects.equals(optimizedConfiguration, that.optimizedConfiguration);
}
@Override
public int hashCode() {
return Objects.hash(superstreamClusterId, active, clientId, ipAddress, clientVersion, language, clientType, originalConfiguration, optimizedConfiguration);
}
@Override
public String toString() {
return "ClientMessage{" +
"superstream_cluster_id=" + superstreamClusterId +
", active=" + active +
", client_id='" + clientId + '\'' +
", ip_address='" + ipAddress + '\'' +
", version='" + clientVersion + '\'' +
", language='" + language + '\'' +
", client_type='" + clientType + '\'' +
", original_configuration=" + originalConfiguration +
", optimized_configuration=" + optimizedConfiguration +
'}';
}
} |
0 | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream/model/MetadataMessage.java | package ai.superstream.model;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import java.util.Objects;
/**
* Represents a message from the superstream.metadata_v1 topic.
*/
public class MetadataMessage {
private int superstreamClusterId;
private boolean active;
private List<TopicConfiguration> topicsConfiguration;
public MetadataMessage() {
// Default constructor for Jackson
}
public MetadataMessage(int superstreamClusterId, boolean active, List<TopicConfiguration> topicsConfiguration) {
this.superstreamClusterId = superstreamClusterId;
this.active = active;
this.topicsConfiguration = topicsConfiguration;
}
@JsonProperty("superstream_cluster_id")
public int getSuperstreamClusterId() {
return superstreamClusterId;
}
@JsonProperty("superstream_cluster_id")
public void setSuperstreamClusterId(int superstreamClusterId) {
this.superstreamClusterId = superstreamClusterId;
}
public boolean isActive() {
return active;
}
public void setActive(boolean active) {
this.active = active;
}
@JsonProperty("topics_configuration")
public List<TopicConfiguration> getTopicsConfiguration() {
return topicsConfiguration;
}
@JsonProperty("topics_configuration")
public void setTopicsConfiguration(List<TopicConfiguration> topicsConfiguration) {
this.topicsConfiguration = topicsConfiguration;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MetadataMessage that = (MetadataMessage) o;
return superstreamClusterId == that.superstreamClusterId &&
active == that.active &&
Objects.equals(topicsConfiguration, that.topicsConfiguration);
}
@Override
public int hashCode() {
return Objects.hash(superstreamClusterId, active, topicsConfiguration);
}
@Override
public String toString() {
return "MetadataMessage{" +
"superstream_cluster_id=" + superstreamClusterId +
", active=" + active +
", topics_configuration=" + topicsConfiguration +
'}';
}
} |
0 | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream/util/NetworkUtils.java | package ai.superstream.util;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.Enumeration;
/**
* Utility class for network-related operations.
*/
public class NetworkUtils {
private static final SuperstreamLogger logger = SuperstreamLogger.getLogger(NetworkUtils.class);
private static String cachedIpAddress = null;
/**
* Get the local IP address.
*
* @return The local IP address, or "unknown" if it can't be determined
*/
public static String getLocalIpAddress() {
if (cachedIpAddress != null) {
return cachedIpAddress;
}
try {
// Try to get the primary network interface's IP address
Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces();
while (interfaces.hasMoreElements()) {
NetworkInterface networkInterface = interfaces.nextElement();
if (networkInterface.isLoopback() || !networkInterface.isUp()) {
continue;
}
Enumeration<InetAddress> addresses = networkInterface.getInetAddresses();
while (addresses.hasMoreElements()) {
InetAddress address = addresses.nextElement();
if (address.getHostAddress().contains(".")) { // Prefer IPv4
cachedIpAddress = address.getHostAddress();
return cachedIpAddress;
}
}
}
// Fall back to the local host address
InetAddress localHost = InetAddress.getLocalHost();
cachedIpAddress = localHost.getHostAddress();
return cachedIpAddress;
} catch (SocketException | UnknownHostException e) {
logger.error("Failed to determine local IP address", e);
return "unknown";
}
}
}
|
0 | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream | java-sources/ai/superstream/superstream-clients-java/1.0.1-beta/ai/superstream/util/SuperstreamLogger.java | package ai.superstream.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Custom logger for the Superstream library.
* All logs will have the "superstream" prefix.
*/
public class SuperstreamLogger {
private static final String PREFIX = "superstream";
private final Logger logger;
private SuperstreamLogger(Class<?> clazz) {
this.logger = LoggerFactory.getLogger(clazz);
}
/**
* Get a logger for the specified class.
*
* @param clazz The class to get the logger for
* @return A new SuperstreamLogger instance
*/
public static SuperstreamLogger getLogger(Class<?> clazz) {
return new SuperstreamLogger(clazz);
}
/**
* Add the superstream prefix to a log message.
*
* @param message The original message
* @return The message with prefix
*/
private String withPrefix(String message) {
return "[" + PREFIX + "] " + message;
}
/**
* Log an info message with the superstream prefix.
*
* @param message The message to log
*/
public void info(String message) {
logger.info(withPrefix(message));
}
/**
* Log an info message with parameters and the superstream prefix.
*
* @param message The message to log
* @param args The parameters for the message
*/
public void info(String message, Object... args) {
logger.info(withPrefix(message), args);
}
/**
* Log a warning message with the superstream prefix.
*
* @param message The message to log
*/
public void warn(String message) {
logger.warn(withPrefix(message));
}
/**
* Log a warning message with parameters and the superstream prefix.
*
* @param message The message to log
* @param args The parameters for the message
*/
public void warn(String message, Object... args) {
logger.warn(withPrefix(message), args);
}
/**
* Log an error message with the superstream prefix.
*
* @param message The message to log
*/
public void error(String message) {
logger.error(withPrefix(message));
}
/**
* Log an error message with parameters and the superstream prefix.
*
* @param message The message to log
* @param args The parameters for the message
*/
public void error(String message, Object... args) {
logger.error(withPrefix(message), args);
}
/**
* Log an error message with an exception and the superstream prefix.
*
* @param message The message to log
* @param throwable The exception to log
*/
public void error(String message, Throwable throwable) {
logger.error(withPrefix(message), throwable);
}
/**
* Log a debug message with the superstream prefix.
*
* @param message The message to log
*/
public void debug(String message) {
logger.debug(withPrefix(message));
}
/**
* Log a debug message with parameters and the superstream prefix.
*
* @param message The message to log
* @param args The parameters for the message
*/
public void debug(String message, Object... args) {
logger.debug(withPrefix(message), args);
}
} |
0 | java-sources/ai/superstream/superstream-java | java-sources/ai/superstream/superstream-java/1.0.12-beta16/App.java | package example;
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import com.fasterxml.jackson.databind.ObjectMapper;
import ai.superstream.Superstream;
public class App
{
public static void main( String[] args ) {
try{
Properties producerProperties = new Properties();
// Producer Configs
producerProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
producerProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
// Common Configs
producerProperties.put("security.protocol", "SASL_SSL");
producerProperties.put("sasl.mechanism", "PLAIN");
producerProperties.put("sasl.jaas.config",
"org.apache.kafka.common.security.plain.PlainLoginModule required username='****' password='****';");
producerProperties.put("client.dns.lookup", "use_all_dns_ips");
producerProperties.put("bootstrap.servers", "****");
producerProperties = Superstream.initSuperstreamProps(producerProperties, "producer");
// Create a producer
KafkaProducer<String, String> producer = new KafkaProducer<>(producerProperties);
ObjectMapper mapper = new ObjectMapper();
Map<String, Object> jsonMap = new HashMap<>();
jsonMap.put("id", 23);
jsonMap.put("age", 28);
jsonMap.put("first", "John");
jsonMap.put("last", "Bratslavsky");
jsonMap.put("hello", "Bratslavsky");
jsonMap.put("world", "Bratslavsky");
String jsonString = mapper.writeValueAsString(jsonMap);
// Produce some messages
for (int i = 0; i < 50000; i++) {
producer.send(new ProducerRecord<>("sample_topic", Integer.toString(i), jsonString));
}
producer.close();
Properties consumerProperties = new Properties();
// Consumer Configs
consumerProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
consumerProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
consumerProperties.put(ConsumerConfig.GROUP_ID_CONFIG, "test-group23");
consumerProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// Common Configs
consumerProperties.put("security.protocol", "SASL_SSL");
consumerProperties.put("sasl.mechanism", "PLAIN");
consumerProperties.put("sasl.jaas.config",
"org.apache.kafka.common.security.plain.PlainLoginModule required username='****' password='****';");
consumerProperties.put("client.dns.lookup", "use_all_dns_ips");
consumerProperties.put("bootstrap.servers", "****");
consumerProperties = Superstream.initSuperstreamProps(consumerProperties, "consumer");
// Create a consumer
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProperties);
// Subscribe to topic
consumer.subscribe(Collections.singletonList("javajava4"));
Integer counter = 0;
// Poll for new data
try {
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
for (ConsumerRecord<String, String> record : records) {
String val = record.value();
System.out.println(val);
counter++;
System.out.println(counter);
}
}
} finally {
System.out.println(counter);
consumer.close();
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
|
0 | java-sources/ai/superstream/superstream-java/1.0.12-beta16/ai | java-sources/ai/superstream/superstream-java/1.0.12-beta16/ai/superstream/Consts.java | package ai.superstream;
public class Consts {
public static final String sdkVersion = "1.0.11";
public static final String clientReconnectionUpdateSubject = "internal_tasks.clientReconnectionUpdate";
public static final String clientTypeUpdateSubject = "internal.clientTypeUpdate";
public static final String clientRegisterSubject = "internal.registerClient";
public static final String originalSerializer = "original.serializer";
public static final String originalDeserializer = "original.deserializer";
public static final String superstreamDefaultToken = "no-auth";
public static final String superstreamErrorSubject = "internal.clientErrors";
public static final String superstreamUpdatesSubject = "internal.updates.%s";
public static final String superstreamClientsUpdateSubject = "internal_tasks.clientsUpdate.%s.%s";
public static final String superstreamLearningSubject = "internal.schema.learnSchema.%s";
public static final String superstreamRegisterSchemaSubject = "internal_tasks.schema.registerSchema.%s";
public static final String superstreamInternalUsername = "superstream_internal";
public static final String superstreamGetSchemaSubject = "internal.schema.getSchema.%s";
public static final Integer superstreamDefaultLearningFactor = 20;
public static final String superstreamLearningFactorKey = "superstream.learning.factor";
public static final String superstreamTagsKey = "superstream.tags";
public static final String superstreamHostKey = "superstream.host";
public static final String superstreamTokenKey = "superstream.token";
public static final String superstreamReductionEnabledKey = "superstream.reduction.enabled";
public static final String superstreamConnectionKey = "superstream.connection";
public static final String superstreamInnerConsumerKey = "superstream.inner.consumer";
public static final String superstreamMetadataTopic = "superstream.metadata";
public static final String clientStartSubject = "internal.startClient.%s";
}
|
0 | java-sources/ai/superstream/superstream-java/1.0.12-beta16/ai | java-sources/ai/superstream/superstream-java/1.0.12-beta16/ai/superstream/NatsAuthHandler.java | package ai.superstream;
import io.nats.client.AuthHandler;
import io.nats.client.NKey;
public class NatsAuthHandler implements AuthHandler {
private final String jwt;
private final NKey nkey;
public NatsAuthHandler(String jwt, String nkeySeed) {
this.jwt = jwt;
this.nkey = NKey.fromSeed(nkeySeed.toCharArray());
}
@Override
public char[] getID() {
return jwt.toCharArray();
}
@Override
public byte[] sign(byte[] nonce) {
try {
return nkey.sign(nonce);
} catch (Exception e) {
// Handle signing error
e.printStackTrace();
return null;
}
}
@Override
public char[] getJWT() {
return jwt.toCharArray();
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.